hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace62378cbfe5cfecd3b44aade45c41122ca7ea8 | 4,270 | py | Python | lib/Export_HTML.py | x211321/VGC_Analyze | e876d83f5682d0eae111478887e0c0a607055130 | [
"MIT"
] | null | null | null | lib/Export_HTML.py | x211321/VGC_Analyze | e876d83f5682d0eae111478887e0c0a607055130 | [
"MIT"
] | null | null | null | lib/Export_HTML.py | x211321/VGC_Analyze | e876d83f5682d0eae111478887e0c0a607055130 | [
"MIT"
] | null | null | null | import lib.Settings as settings
import os
import lib.Var as VAR
from lib.Browser import openBrowser
class Export_HTML(object):
def __init__(self, itemView):
self.itemView = itemView
def export(self):
rows = []
for item in self.itemView.get_children():
# When data is grouped
if item[0] == "#":
# Append group
rows.append({"key": item, "data": self.itemView.item(item)})
# Iterate child elements
for child in self.itemView.get_children(item):
rows.append({"key": child, "data": self.itemView.item(child)})
else:
rows.append({"key": item, "data": self.itemView.item(item)})
# Get active columns
temp = settings.get("display", "columns", [])
activeColumns = []
for column in VAR.VIEW_COLUMNS:
if column in temp or len(temp) == 0:
activeColumns.append(column)
# Generate HTML
htmlContent = '<html>\n'
htmlContent += '\t<head>\n'
htmlContent += '\t\t<link rel="stylesheet" href="export.css">\n'
htmlContent += '\t</head>\n'
htmlContent += '\t<body>\n'
htmlContent += '\t\t<table class="table">\n'
htmlContent += '\t\t\t<tr class="header_row">\n'
for column in activeColumns:
htmlContent += '\t\t\t\t<th class="header_cell" id="'+self.getID(column)+'">' + VAR.VIEW_COLUMNS[column]["name"] + '</th>\n'
htmlContent += '\t\t\t</tr>\n'
for row in rows:
if row["key"][0] == "#":
htmlContent += '\t\t\t<tr class="group_header_row">\n'
htmlContent += '\t\t\t\t<td class="group_header" colspan="'+str(len(activeColumns))+'">' + row["key"][1:] + ' ' + self.getColumnValue("Title", row) + ' ' + self.getColumnValue("Price", row) + '</td>\n'
htmlContent += '\t\t\t</tr>\n'
else:
htmlContent += '\t\t\t<tr class="item_row">\n'
for column in activeColumns:
htmlContent += '\t\t\t\t<td class="item_cell" id="'+self.getID(column)+'">' + self.getColumnValue(column, row) + '</td>\n'
htmlContent += '\t\t\t</tr>\n'
htmlContent += '\t\t</table>\n'
htmlContent += "\t</body>\n"
htmlContent += "</html>\n"
# Save to file
if not os.path.exists(VAR.EXPORT_PATH):
os.makedirs(VAR.EXPORT_PATH)
file = open (VAR.EXPORT_PATH + "collection_export.html", "w", encoding="utf-8")
file.write(htmlContent)
file.close()
# Generate css
self.generateCss()
# Open in browser
openBrowser(os.path.realpath(file.name))
def getColumnValue(self, columnKey, row):
index = list(VAR.VIEW_COLUMNS.keys()).index(columnKey) + 2
return row["data"]["values"][index]
def getID(self, column):
return column.lower().replace(" ", "_").replace("(", "").replace(")", "")
def generateCss(self):
css = """
table {
margin-left: auto;
margin-right: auto;
font-family: 'Lucida Sans', 'Lucida Sans Regular', 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, sans-serif;
}
table, th, td {
border-bottom: 1px solid #C0C0C0;
}
th {
text-align:left;
}
td, th {
padding-right: 2rem;
}
#price{
text-align: right;
}
.group_header {
background-color: #F0F0F0;
font-weight: 700;
font-size: larger;
}
"""
if not os.path.exists(VAR.EXPORT_PATH):
os.makedirs(VAR.EXPORT_PATH)
# Only write css when file doesn't exist yet
# gives the user the option to place custom css
if not os.path.exists(VAR.EXPORT_PATH + "export.css"):
file = open(VAR.EXPORT_PATH + "export.css", "w")
file.write(css)
file.close()
| 32.59542 | 217 | 0.503279 |
ace62454833b49d69017d400053b1b8cabec8aee | 5,295 | py | Python | tests/clvm/coin_store.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | 1 | 2022-03-02T12:36:42.000Z | 2022-03-02T12:36:42.000Z | tests/clvm/coin_store.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | null | null | null | tests/clvm/coin_store.py | WarutaShinken/staidelta-blockchain | ab6fd7d0ea93ac8b1b293240aab18db8db34718d | [
"Apache-2.0"
] | null | null | null | from collections import defaultdict
from dataclasses import dataclass, replace
from typing import Dict, Iterator, Optional
from staidelta.util.condition_tools import created_outputs_for_conditions_dict
from staidelta.full_node.mempool_check_conditions import mempool_check_conditions_dict, get_name_puzzle_conditions
from staidelta.types.blockchain_format.coin import Coin
from staidelta.types.blockchain_format.sized_bytes import bytes32
from staidelta.types.coin_record import CoinRecord
from staidelta.types.spend_bundle import SpendBundle
from staidelta.util.ints import uint32, uint64
from staidelta.full_node.bundle_tools import simple_solution_generator
from staidelta.util.errors import Err
from staidelta.consensus.cost_calculator import NPCResult
MAX_COST = 11000000000
class BadSpendBundleError(Exception):
pass
@dataclass
class CoinTimestamp:
seconds: int
height: int
class CoinStore:
def __init__(self, reward_mask: int = 0):
self._db: Dict[bytes32, CoinRecord] = dict()
self._ph_index: Dict = defaultdict(list)
self._reward_mask = reward_mask
def farm_coin(
self,
puzzle_hash: bytes32,
birthday: CoinTimestamp,
amount: int = 1024,
prefix=bytes32.fromhex("ccd5bb71183532bff220ba46c268991a00000000000000000000000000000000"), # noqa
) -> Coin:
parent = bytes32(
[
a | b
for a, b in zip(
prefix,
birthday.height.to_bytes(32, "big"),
)
],
)
# parent = birthday.height.to_bytes(32, "big")
coin = Coin(parent, puzzle_hash, uint64(amount))
self._add_coin_entry(coin, birthday)
return coin
def validate_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
max_cost: int,
cost_per_byte: int,
) -> int:
# this should use blockchain consensus code
program = simple_solution_generator(spend_bundle)
result: NPCResult = get_name_puzzle_conditions(program, max_cost, cost_per_byte=cost_per_byte, safe_mode=True)
if result.error is not None:
raise BadSpendBundleError(f"condition validation failure {Err(result.error)}")
ephemeral_db = dict(self._db)
for npc in result.npc_list:
for coin in created_outputs_for_conditions_dict(npc.condition_dict, npc.coin_name):
name = coin.name()
ephemeral_db[name] = CoinRecord(
coin,
uint32(now.height),
uint32(0),
False,
False,
uint64(now.seconds),
)
for npc in result.npc_list:
prev_transaction_block_height = uint32(now.height)
timestamp = uint64(now.seconds)
coin_record = ephemeral_db.get(npc.coin_name)
if coin_record is None:
raise BadSpendBundleError(f"coin not found for id 0x{npc.coin_name.hex()}") # noqa
err = mempool_check_conditions_dict(
coin_record,
npc.condition_dict,
prev_transaction_block_height,
timestamp,
)
if err is not None:
raise BadSpendBundleError(f"condition validation failure {Err(err)}")
return 0
def update_coin_store_for_spend_bundle(
self,
spend_bundle: SpendBundle,
now: CoinTimestamp,
max_cost: int,
cost_per_byte: int,
):
err = self.validate_spend_bundle(spend_bundle, now, max_cost, cost_per_byte)
if err != 0:
raise BadSpendBundleError(f"validation failure {err}")
additions = spend_bundle.additions()
removals = spend_bundle.removals()
for new_coin in additions:
self._add_coin_entry(new_coin, now)
for spent_coin in removals:
coin_name = spent_coin.name()
coin_record = self._db[coin_name]
self._db[coin_name] = replace(coin_record, spent_block_index=now.height, spent=True)
return additions, spend_bundle.coin_spends
def coins_for_puzzle_hash(self, puzzle_hash: bytes32) -> Iterator[Coin]:
for coin_name in self._ph_index[puzzle_hash]:
coin_entry = self._db[coin_name]
assert coin_entry.coin.puzzle_hash == puzzle_hash
yield coin_entry.coin
def all_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
yield coin_entry.coin
def all_unspent_coins(self) -> Iterator[Coin]:
for coin_entry in self._db.values():
if not coin_entry.spent:
yield coin_entry.coin
def _add_coin_entry(self, coin: Coin, birthday: CoinTimestamp) -> None:
name = coin.name()
# assert name not in self._db
self._db[name] = CoinRecord(
coin,
uint32(birthday.height),
uint32(0),
False,
False,
uint64(birthday.seconds),
)
self._ph_index[coin.puzzle_hash].append(name)
def coin_record(self, coin_id: bytes32) -> Optional[CoinRecord]:
return self._db.get(coin_id)
| 35.066225 | 118 | 0.632483 |
ace6253640c21204dcb9c8e4765a3d443ce6c588 | 4,360 | py | Python | src/gradcam.py | QuinnQiao/pytorch-cnn-visualizations | bcbaf39899a976eb80ff2cc2f078c1445f2fa6a4 | [
"MIT"
] | null | null | null | src/gradcam.py | QuinnQiao/pytorch-cnn-visualizations | bcbaf39899a976eb80ff2cc2f078c1445f2fa6a4 | [
"MIT"
] | null | null | null | src/gradcam.py | QuinnQiao/pytorch-cnn-visualizations | bcbaf39899a976eb80ff2cc2f078c1445f2fa6a4 | [
"MIT"
] | null | null | null | """
Created on Thu Oct 26 11:06:51 2017
@author: Utku Ozbulak - github.com/utkuozbulak
"""
from PIL import Image
import numpy as np
import torch
from misc_functions import get_example_params, save_class_activation_images
class CamExtractor():
"""
Extracts cam features from the model
"""
def __init__(self, model, target_layer):
self.model = model
self.target_layer = target_layer
self.gradients = None
def save_gradient(self, grad):
self.gradients = grad
def forward_pass_on_convolutions(self, x):
"""
Does a forward pass on convolutions, hooks the function at given layer
"""
conv_output = None
for module_pos, module in self.model.features._modules.items():
x = module(x) # Forward
if int(module_pos) == self.target_layer:
x.register_hook(self.save_gradient)
conv_output = x # Save the convolution output on that layer
return conv_output, x
def forward_pass(self, x):
"""
Does a full forward pass on the model
"""
# Forward pass on the convolutions
conv_output, x = self.forward_pass_on_convolutions(x)
x = x.view(x.size(0), -1) # Flatten
# Forward pass on the classifier
x = self.model.classifier(x)
return conv_output, x
class GradCam():
"""
Produces class activation map
"""
def __init__(self, model, target_layer):
self.model = model
self.model.eval()
# Define extractor
self.extractor = CamExtractor(self.model, target_layer)
def generate_cam(self, input_image, target_class=None):
# Full forward pass
# conv_output is the output of convolutions at specified layer
# model_output is the final output of the model (1, 1000)
conv_output, model_output = self.extractor.forward_pass(input_image)
if target_class is None:
target_class = np.argmax(model_output.data.numpy())
# Target for backprop
one_hot_output = torch.FloatTensor(1, model_output.size()[-1]).zero_()
one_hot_output[0][target_class] = 1
# Zero grads
self.model.features.zero_grad()
self.model.classifier.zero_grad()
# Backward pass with specified target
model_output.backward(gradient=one_hot_output, retain_graph=True)
# Get hooked gradients
guided_gradients = self.extractor.gradients.data.numpy()[0]
# Get convolution outputs
target = conv_output.data.numpy()[0]
# Get weights from gradients
weights = np.mean(guided_gradients, axis=(1, 2)) # Take averages for each gradient
# Create empty numpy array for cam
# cam = np.ones(target.shape[1:], dtype=np.float32)
cam = np.zeros(target.shape[1:], dtype=np.float32)
# Multiply each weight with its conv output and then, sum
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = (cam - np.min(cam)) / (np.max(cam) - np.min(cam)) # Normalize between 0-1
cam = np.uint8(cam * 255) # Scale between 0-255 to visualize
cam = np.uint8(Image.fromarray(cam).resize((input_image.shape[2],
input_image.shape[3]), Image.ANTIALIAS))/255
# ^ I am extremely unhappy with this line. Originally resizing was done in cv2 which
# supports resizing numpy matrices with antialiasing, however,
# when I moved the repository to PIL, this option was out of the window.
# So, in order to use resizing with ANTIALIAS feature of PIL,
# I briefly convert matrix to PIL image and then back.
# If there is a more beautiful way, do not hesitate to send a PR.
return cam
if __name__ == '__main__':
# Get params
# target_example = 0 # Snake
target_example = 1 # cat_dog
(original_image, prep_img, target_class, file_name_to_export, pretrained_model) =\
get_example_params(target_example)
# Grad cam
grad_cam = GradCam(pretrained_model, target_layer=11)
# Generate cam mask
cam = grad_cam.generate_cam(prep_img, target_class)
# Save mask
save_class_activation_images(original_image, cam, file_name_to_export)
print('Grad cam completed')
| 38.584071 | 92 | 0.644725 |
ace6261a8e45094959bda276c4f39afd4c2cf93e | 12,957 | py | Python | thetis/optimisation.py | connorjward/thetis | 0fcfae106587adf0c71fcdfedb3e3a3cf8c609ff | [
"MIT"
] | 45 | 2016-04-13T22:33:19.000Z | 2022-03-17T22:22:15.000Z | thetis/optimisation.py | connorjward/thetis | 0fcfae106587adf0c71fcdfedb3e3a3cf8c609ff | [
"MIT"
] | 188 | 2016-02-17T06:14:37.000Z | 2022-03-18T10:46:49.000Z | thetis/optimisation.py | connorjward/thetis | 0fcfae106587adf0c71fcdfedb3e3a3cf8c609ff | [
"MIT"
] | 26 | 2016-04-26T15:03:47.000Z | 2022-02-04T16:28:01.000Z | """
Some classes to help optimisation problems formulated with thetis_adjoint.
In particular this module contains some OptimisationCallbacks that can be used
as callbacks of a :class:`ReducedFunctional` called at various stages during the optimisation
process:
- eval_cb_pre(controls) and eval_cb_post(functional, controls) called before and after (re)evaluation of the forward model
- derivative_cb_pre(controls) and eval_cb_post(functional, derivative, controls) called before and after the gradient computation using the adjoint of the model
- hessian_cb_pre(controls) and eval_cb_post(functional, derivative, controls) called before and after the hessian computation
OptimisationCallbacks that (can) use controls, functional and derivative information, work out
what is provided by the number of arguments: current control values are always in the last argument;
if more than 2 arguments are provided, the first is the latest evaluated functional value.
"""
from firedrake import *
from .callback import DiagnosticCallback
from .exporter import ExportManager
import thetis.field_defs as field_defs
from abc import abstractmethod
import numpy
class UserExportManager(ExportManager):
"""
ExportManager for user provided functions (not necessarily known to Thetis)
In the standard :class:`.ExportManager` all provided functions need to have standard names
present in :py:data:`.field_metadata`. Here, any functions can be provided. If function.name() is in
:py:data:`.field_metadata`, the standard filename and shortname are used.
If the function.name() is unknown, both are based on function.name()
directly (with an optional additional filename_prefix). Filenames and
shortnames can be overruled by the shortnames and filenames arguments."""
def __init__(self, solver_obj_or_outputdir, functions_to_export,
filenames=None, filename_prefix='',
shortnames=None, **kwargs):
"""
:arg solver_obj_or_outputdir: a :class:`.FlowSolver2d` object, used to determine the output directory. Alternatively, the
outputdir can be specified with a string as the first argument.
:arg functions_to_export: a list of :class:`Function` s
:arg filenames: a list of strings that specify the filename for each provided function. If not provided,
filenames are based on function.name().
:arg filename_prefix: a string prefixed to each filename
:arg shortnames: a list of strings with the shortnames used for each provided function. If not provided,
shortnames are based on function.name().
:arg kwargs: any further keyword arguments are passed on to :class:`.ExportManager`"""
try:
outputdir = solver_obj_or_outputdir.options.output_directory
except AttributeError:
outputdir = solver_obj_or_outputdir
if shortnames is None:
field_name_list = [function.name() for function in functions_to_export]
else:
field_name_list = shortnames
field_dict = {}
field_metadata = {}
for field_name, function in zip(field_name_list, functions_to_export):
field_dict[field_name] = function
if shortnames is None and field_name in field_defs.field_metadata:
field_metadata[field_name] = {'shortname': field_defs.field_metadata[field_name]}
else:
field_metadata[field_name] = {'shortname': field_name}
if filenames is None:
for field_name in field_name_list:
if field_name in field_defs.field_metadata:
field_metadata[field_name]['filename'] = field_defs.field_metadata['filename']
else:
field_metadata[field_name]['filename'] = filename_prefix + field_name
else:
for field_name, filename in zipt(field_name_list, filenames):
field_metadata[field_name]['filename'] = filename
super().__init__(outputdir, field_name_list, field_dict, field_metadata, **kwargs)
class DeferredExportManager(object):
"""
A wrapper around a UserExportManager that is only created on the first export() call.
In addition the functions provided in the export call are copied into a fixed set of functions,
where the functions provided in subsequent calls may be different (they need to be in the same
function space). This is used in the :class:`.ControlsExportOptimisationCallback`
and :class:`.DerivativesExportOptimisationCallback`."""
def __init__(self, solver_obj_or_outputdir, **kwargs):
"""
:arg solver_obj_or_outputdir: a :class:`.FlowSolver2d` object, used to determine the output directory. Alternatively, the
outputdir can be specified with a string as the first argument.
:arg kwargs: any further keyword arguments are passed on to :class:`.UserExportManager`"""
self.solver_obj_or_outputdir = solver_obj_or_outputdir
self.kwargs = kwargs
self.export_manager = None
def export(self, functions, suggested_names=None):
"""
Create the :class:`.UserExportManager` (first call only), and call its export() method.
:arg functions: a list of :class:`Function` s that the :class:`.UserExportManager` will be based on. Their values
are first copied. The list may contain different functions in subsequent calls,
but their function space should remain the same.
"""
try:
len(functions)
except (TypeError, NotImplementedError):
functions = [functions]
if self.export_manager is None:
if suggested_names is None:
self.functions = [Function(function.function_space(), name=function.name()) for function in functions]
else:
self.functions = [Function(function.function_space(), name=name) for function, name in zip(functions, suggested_names)]
self.export_manager = UserExportManager(self.solver_obj_or_outputdir, self.functions, **self.kwargs)
for function, function_arg in zip(self.functions, functions):
assert function.function_space() is function_arg.function_space()
function.assign(function_arg)
self.export_manager.export()
class UserExportOptimisationCallback(UserExportManager):
"""A :class:`.UserExportManager` that can be used as a :class:`ReducedFunctional` callback
Any callback arguments (functional value, derivatives, controls) are ignored"""
def __init__(self, solver_obj_or_outputdir, functions_to_export, **kwargs):
"""
:arg solver_obj_or_outputdir: a :class:`.FlowSolver2d` object, used to determine the output directory. Alternatively, the
outputdir can be specified with a string as the first argument.
:arg functions_to_export: a list of :class:`Function` s
:arg kwargs: any further keyword arguments are passed on to :class:`.UserExportManager`"""
kwargs.setdefault('filename_prefix', 'optimisation_') # use prefix to avoid overwriting forward model output
super().__init__(solver_obj_or_outputdir, functions_to_export, **kwargs)
# we need to maintain the original functions in the dict as it
# is their block_variables (representing the current "end"-state)
# that determine what will be written
self.orig_functions = self.functions.copy()
def __call__(self, *args):
"""
Ensure the :class:`.UserExportManager` uses the checkpointed values and call its export().
:args: these are ignored"""
for name in self.fields_to_export:
self.functions[name] = self.orig_functions[name].block_variable.saved_output
self.export()
class ControlsExportOptimisationCallback(DeferredExportManager):
"""A callback that exports the current control values (assumed to all be :class:`Function` s)
The control values are assumed to be the last argument in the callback (as for all :class:`ReducedFunctional` callbacks)."""
def __init__(self, solver_obj_or_outputdir, **kwargs):
"""
:arg solver_obj_or_outputdir: a :class:`.FlowSolver2d` object, used to determine the output directory. Alternatively, the
outputdir can be specified with a string as the first argument.
:arg kwargs: any further keyword arguments are passed on to :class:`.UserExportManager`"""
kwargs.setdefault('filename_prefix', 'control_')
super().__init__(solver_obj_or_outputdir, **kwargs)
def __call__(self, *args):
self.export(args[-1])
class DerivativesExportOptimisationCallback(DeferredExportManager):
"""A callback that exports the derivatives calculated by the adjoint.
The derivatives are assumed to be the second argument in the callback. This can therefore
be used as a derivative_cb_post callback in a :class:`ReducedFunctional`"""
def __init__(self, solver_obj_or_outputdir, **kwargs):
"""
:arg solver_obj_or_outputdir: a :class:`.FlowSolver2d` object, used to determine the output directory. Alternatively, the
outputdir can be specified with a string as the first argument.
:arg kwargs: any further keyword arguments are passed on to :class:`.UserExportManager`"""
kwargs.setdefault('filename_prefix', 'derivative_')
super().__init__(solver_obj_or_outputdir, **kwargs)
def __call__(self, *args):
if len(args) != 3:
raise TypeError("DerivativesExportOptimsationCallback called with wrong number of arguments: should be used for derivative_cb_post callback only.")
try:
# get name from controls args[-1]
names = [function.name() for function in args[-1]]
except (TypeError, NotImplementedError):
# args[-1] is not a list but a single control
names = [args[-1].name()]
self.export(args[1], suggested_names=names)
class OptimisationCallbackList(list):
"""
A list of callbacks that can be used as a single callback itself.
Calls all callbacks in order."""
def __call__(self, *args):
for callback in self:
callback(*args)
class DiagnosticOptimisationCallback(DiagnosticCallback):
"""
An OptimsationCallback similar to :class:`.DiagnosticCallback` that can be used as callback in a :class:`ReducedFunctional`.
Note that in this case the computing of the values needs to be defined in the compute_values method,
not in the __call__ method (as this one is directly called from the :class:`ReducedFunctional`). In addition,
like any :class:`.DiagnosticCallback`, the name and variable_names properties and a message_str method need to be defined.
"""
def __init__(self, solver_obj, **kwargs):
"""
:arg solver_obj: Thetis solver object
:arg kwargs: keyword arguments passed to :class:`.DiagnosticCallback`
"""
kwargs.setdefault('include_time', False)
super().__init__(solver_obj, **kwargs)
@abstractmethod
def compute_values(self, *args):
"""
Compute diagnostic values.
This method is to be implemented in concrete subclasses of a :class:`.DiagnosticOptimisationCallback`.
The number of arguments varies depending on which of the 6 [eval|derivative|hessian]_cb_[pre|post] callbacks
this is used as. The last argument always contains the current controls. In the "pre" callbacks this is
the only argument. In all "post" callbacks the 0th argument is the current functional value. eval_cb_post
is given two arguments: functional and controls. derivative_cb_post and hessian_cb_post are given three
arguments with args[1] being the derivative/hessian just calculated."""
pass
def evaluate(self, *args, index=None):
"""Evaluates callback and pushes values to log and hdf file (if enabled)"""
values = self.compute_values(*args)
if len(args) > 0:
functional = args[0]
else:
functional = numpy.nan
if self.append_to_log:
self.push_to_log(functional, values)
if self.append_to_hdf5:
self.push_to_hdf5(functional, values)
def __call__(self, *args):
self.evaluate(*args)
class FunctionalOptimisationCallback(DiagnosticOptimisationCallback):
"""
A simple OptimisationCallback that records the functional value in the log and/or hdf5 file."""
variable_names = ['functional']
name = 'functional'
def compute_values(self, *args):
if len(args) == 0:
raise TypeError('FunctionalOptimisationCallback can be used as _post callback only.')
return [args[0]]
def message_str(self, functional):
return 'Functional value: {}'.format(functional)
| 50.416342 | 161 | 0.699236 |
ace6268561260b0c945ce281d31786290caa205c | 520 | py | Python | main_app/migrations/0009_auto_20200331_1135.py | wszoltysek/give_things | 240266460f0d7b7777cdaa8383edce80ea9e6024 | [
"MIT"
] | null | null | null | main_app/migrations/0009_auto_20200331_1135.py | wszoltysek/give_things | 240266460f0d7b7777cdaa8383edce80ea9e6024 | [
"MIT"
] | null | null | null | main_app/migrations/0009_auto_20200331_1135.py | wszoltysek/give_things | 240266460f0d7b7777cdaa8383edce80ea9e6024 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2020-03-31 11:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0008_auto_20200331_1116'),
]
operations = [
migrations.RemoveField(
model_name='donation',
name='institution',
),
migrations.AddField(
model_name='donation',
name='institution',
field=models.ManyToManyField(to='main_app.Institution'),
),
]
| 22.608696 | 68 | 0.588462 |
ace626c1e9624d18a21bf43025c731108f40c20f | 136 | py | Python | robson_api/app.py | RobsonAlessio/robson-api | 909bb136b61d91f36c0d0e8de08102966c840292 | [
"Unlicense"
] | null | null | null | robson_api/app.py | RobsonAlessio/robson-api | 909bb136b61d91f36c0d0e8de08102966c840292 | [
"Unlicense"
] | null | null | null | robson_api/app.py | RobsonAlessio/robson-api | 909bb136b61d91f36c0d0e8de08102966c840292 | [
"Unlicense"
] | null | null | null | """
robson_api base module.
"""
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def index():
return {"Hello": "World"}
| 11.333333 | 29 | 0.617647 |
ace62870301876f1f5abd4b74b111d02d863ee9f | 9,408 | py | Python | src/kapidox/utils.py | KDE/kapidox | 0c590019ffbf4768232a941bea13bb9c981086a0 | [
"BSD-2-Clause"
] | 7 | 2015-12-14T09:18:09.000Z | 2020-07-30T17:39:46.000Z | src/kapidox/utils.py | KDE/kapidox | 0c590019ffbf4768232a941bea13bb9c981086a0 | [
"BSD-2-Clause"
] | null | null | null | src/kapidox/utils.py | KDE/kapidox | 0c590019ffbf4768232a941bea13bb9c981086a0 | [
"BSD-2-Clause"
] | 1 | 2020-04-13T18:04:03.000Z | 2020-04-13T18:04:03.000Z | # -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: 2014 Aurélien Gâteau <agateau@kde.org>
#
# SPDX-License-Identifier: BSD-2-Clause
from fnmatch import fnmatch
import logging
import os
import re
import subprocess
import shutil
import sys
import tempfile
import requests
## @package kapidox.utils
#
# Multiple usage utils.
#
# This module contains code which is shared between depdiagram-prepare and
# other components.
#
# Code in this dir should not import any module which is not shipped with
# Python because this module is used by depdiagram-prepare, which must be able
# to run on builds.kde.org, which may not have all the required dependencies.
#
def setup_logging():
FORMAT = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(format=FORMAT, datefmt='%H:%M:%S', level=logging.DEBUG)
def tolist(a):
""" Return a list based on `a`. """
return a if type(a) is list else [a]
def serialize_name(name):
""" Return a serialized name.
For now it only replaces ' ' with '_' and lower the letters.
"""
if name is not None:
return '_'.join(name.lower().split(' '))
else:
return None
def set_repopath(id):
""" Return the repopath for the repo id, queried from projects.kde.org
Args:
id: unique KDE repo identifier
"""
if id is None:
return None
try:
r = requests.get('https://projects.kde.org/api/v1/identifier/' + id)
return r.json()['repo']
except Exception as exc:
# Catch all exceptions here: whatever fails in this function should not
# cause the code to fail
logging.warning("Failed to get repository url for '{}' from projects.kde.org: {}".format(id, exc))
# This means there is no canonical repo identifier for this repo:
# generally that means that the repo was checked out into a non-
# canonical pathname (e.g. kitemviews checkout out into a directory
# called KItemViews, or kitemviews.git .. anything other than
# kitemviews is not recognized).
return None
def set_maintainers(maintainer_keys, all_maintainers):
""" Expend the name of the maintainers.
Args:
dictionary: (dict) Dictionary from which the name to expend will be read.
key: (string) Key of the dictionary where the name to expend is saved.
all_maintainers: (dict of dict) Look-up table where the names and emails of
the maintainers are stored.
Examples:
>>> maintainer_keys = ['arthur', 'toto']
>>> myteam = {'arthur': {'name': 'Arthur Pendragon',
'email': 'arthur@example.com'},
'toto': {'name': 'Toto',
'email: 'toto123@example.com'}
}
>>> set_maintainers(maintainer_keys, my_team)
"""
if not maintainer_keys:
maintainers = []
elif isinstance(maintainer_keys, list):
maintainers = map(lambda x: all_maintainers.get(x, None),
maintainer_keys)
else:
maintainers = [all_maintainers.get(maintainer_keys, None)]
maintainers = [x for x in maintainers if x is not None]
return maintainers
def parse_fancyname(fw_dir):
"""Return the framework name for a given source dir
The framework name is the name of the toplevel CMake project
"""
cmakelists_path = os.path.join(fw_dir, "CMakeLists.txt")
if not os.path.exists(cmakelists_path):
for f in os.listdir(fw_dir):
if ".qbs" in f and not "Test" in f:
return f[:-4]
logging.error("No CMakeLists.txt in {}".format(fw_dir))
return None
project_re = re.compile(r"^\s*project\s*\(\s*([\w\-\_]+)", re.I | re.M)
with open(cmakelists_path) as f:
cmakelists_content = f.read()
match = project_re.search(cmakelists_content)
if match:
return match.group(1)
logging.error("Failed to find framework name: Could not find a "
"'project()' command in {}.".format(cmakelists_path))
return None
def cache_dir():
"""Find/create a semi-long-term cache directory.
We do not use tempdir, except as a fallback, because temporary directories
are intended for files that only last for the program's execution.
"""
cachedir = None
if sys.platform == 'darwin':
try:
from AppKit import NSSearchPathForDirectoriesInDomains
# http://developer.apple.com/DOCUMENTATION/Cocoa/Reference/Foundation/Miscellaneous/Foundation_Functions/Reference/reference.html#//apple_ref/c/func/NSSearchPathForDirectoriesInDomains
# NSApplicationSupportDirectory = 14
# NSUserDomainMask = 1
# True for expanding the tilde into a fully qualified path
cachedir = os.path.join(
NSSearchPathForDirectoriesInDomains(14, 1, True)[0],
'KApiDox')
except:
pass
elif os.name == "posix":
if 'HOME' in os.environ and os.path.exists(os.environ['HOME']):
cachedir = os.path.join(os.environ['HOME'], '.cache', 'kapidox')
elif os.name == "nt":
if 'APPDATA' in os.environ and os.path.exists(os.environ['APPDATA']):
cachedir = os.path.join(os.environ['APPDATA'], 'KApiDox')
if cachedir is None:
cachedir = os.path.join(tempfile.gettempdir(), 'kapidox')
if not os.path.isdir(cachedir):
os.makedirs(cachedir)
return cachedir
def svn_export(remote, local, overwrite=False):
"""Wraps svn export.
Args:
remote: (string) the remote url.
local: (string) the local path where to download.
overwrite: (bool) whether to overwrite `local` or not. (optional,
default = False)
Returns:
True if success.
Raises:
FileNotFoundError:
subprocess.CalledProcessError:
"""
try:
import svn.core
import svn.client
logging.debug("Using Python libsvn bindings to fetch %s", remote)
ctx = svn.client.create_context()
ctx.auth_baton = svn.core.svn_auth_open([])
latest = svn.core.svn_opt_revision_t()
latest.type = svn.core.svn_opt_revision_head
svn.client.export(remote, local, latest, True, ctx)
except ImportError:
logging.debug("Using external svn client to fetch %s", remote)
cmd = ['svn', 'export', '--quiet']
if overwrite:
cmd.append('--force')
cmd += [remote, local]
try:
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
raise subprocess.StandardException(e.output)
except FileNotFoundError as e:
logging.debug("External svn client not found")
return False
# subversion will set the timestamp to match the server
os.utime(local, None)
return True
def copy_dir_contents(directory, dest):
"""Copy the contents of a directory
Args:
directory: (string) the directory to copy the contents of.
dest: (string) the directory to copy them into.
"""
ignored = ['CMakeLists.txt']
ignore = shutil.ignore_patterns(*ignored)
for fn in os.listdir(directory):
f = os.path.join(directory, fn)
if os.path.isfile(f):
docopy = True
for i in ignored:
if fnmatch(fn, i):
docopy = False
break
if docopy:
shutil.copy(f, dest)
elif os.path.isdir(f):
dest_f = os.path.join(dest, fn)
if os.path.isdir(dest_f):
shutil.rmtree(dest_f)
shutil.copytree(f, dest_f, ignore=ignore)
_KAPIDOX_VERSION = None
def get_kapidox_version():
"""Get commit id of running code if it is running from git repository.
May return an empty string if it failed to extract the commit id.
Assumes .git/HEAD looks like this:
ref: refs/heads/master
and assumes .git/refs/heads/master contains the commit id
"""
global _KAPIDOX_VERSION
if _KAPIDOX_VERSION is not None:
return _KAPIDOX_VERSION
_KAPIDOX_VERSION = ""
bin_dir = os.path.dirname(sys.argv[0])
git_dir = os.path.join(bin_dir, "..", ".git")
if not os.path.isdir(git_dir):
# Looks like we are not running from the git repo, exit silently
return _KAPIDOX_VERSION
git_HEAD = os.path.join(git_dir, "HEAD")
if not os.path.isfile(git_HEAD):
logging.warning("Getting git info failed: {} is not a file".format(git_HEAD))
return _KAPIDOX_VERSION
try:
line = open(git_HEAD).readline()
ref_name = line.split(": ")[1].strip()
with open(os.path.join(git_dir, ref_name)) as f:
_KAPIDOX_VERSION = f.read().strip()
except Exception as exc:
# Catch all exceptions here: whatever fails in this function should not
# cause the code to fail
logging.warning("Getting git info failed: {}".format(exc))
return _KAPIDOX_VERSION
def find_dot_files(dot_dir):
"""Returns a list of path to files ending with .dot in subdirs of `dot_dir`."""
lst = []
for (root, dirs, files) in os.walk(dot_dir):
lst.extend([os.path.join(root, x) for x in files if x.endswith('.dot')])
return lst
| 33.72043 | 196 | 0.627232 |
ace62a4e43bd169cdd80dc8bb9877280afc1f091 | 1,629 | py | Python | cdxjGenerator/cdxjGenerator.py | machawk1/cdxjGenerator | ddf857bad2e90a5bc36e1063b72142048e6296d2 | [
"MIT"
] | 1 | 2019-02-08T11:27:11.000Z | 2019-02-08T11:27:11.000Z | cdxjGenerator/cdxjGenerator.py | machawk1/cdxjGenerator | ddf857bad2e90a5bc36e1063b72142048e6296d2 | [
"MIT"
] | 15 | 2020-02-20T21:47:47.000Z | 2020-08-04T21:30:09.000Z | cdxjGenerator/cdxjGenerator.py | machawk1/cdxjGenerator | ddf857bad2e90a5bc36e1063b72142048e6296d2 | [
"MIT"
] | 1 | 2020-07-14T21:52:49.000Z | 2020-07-14T21:52:49.000Z | import datetime
from faker import Faker
import random
import string
import surt
import sys
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def generate_line(provided_urir=None):
fake = Faker()
start_date = datetime.date(year=1, month=1, day=1)
end_date = datetime.date(year=9999, month=12, day=31)
ipfs_char_range = string.ascii_letters + string.digits
while True:
urir = provided_urir or fake.uri()
surted_urir = surt.surt(
urir,
path_strip_trailing_slash_unless_empty=True)
date14 = fake.date_time_between_dates(
start_date, end_date).strftime('%Y%m%d%H%M%S')
locators = (f"urn:ipfs/{id_generator(46, ipfs_char_range)}/"
f"{id_generator(46, ipfs_char_range)}")
cdxj_line = (f"{surted_urir} {date14} "
"{"
f'"locator": "{locators}", '
f'"original_uri": "{urir}", '
'"mime_type": "text/html", "status_code": "200"}'
)
yield cdxj_line
def main():
header_line = '!context ["http://tools.ietf.org/html/rfc7089"]'
print(header_line)
if len(sys.argv) <= 1:
line_count = 10
else:
line_count = int(sys.argv[1])
provided_urir = None
if len(sys.argv) == 3:
provided_urir = sys.argv[2]
line_generator = generate_line(provided_urir)
while line_count > 0:
print(next(line_generator))
line_count -= 1
if __name__ == "__main__":
main()
| 26.274194 | 71 | 0.59423 |
ace62a80324e05c84b22e417d4d5e83b51fbeb65 | 1,639 | py | Python | pypj/main.py | edge-minato/pypj | c928cf9ba29017ed6c0756b24f91d75ae16473e4 | [
"MIT"
] | 13 | 2021-09-29T03:16:42.000Z | 2022-02-28T19:23:28.000Z | pypj/main.py | edge-minato/pypj | c928cf9ba29017ed6c0756b24f91d75ae16473e4 | [
"MIT"
] | 41 | 2021-09-03T09:49:49.000Z | 2022-03-20T20:46:02.000Z | pypj/main.py | edge-minato/pypj | c928cf9ba29017ed6c0756b24f91d75ae16473e4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
from pathlib import Path
from traceback import format_exc
from pypj.cui import ask_no_empty, ask_yN, confirm_proceed
from .args import args
from .const import ASCII_ART, GITHUB_URL
from .environment import Environment
from .exception import PypjError
from .file_path import PypjFilePath
from .setting import PackageName, PypjSetting
from .task import Poetry, TaskManager
def ask_package_name() -> PackageName:
r = ask_no_empty("Package name: ")
return PackageName(r) if PackageName.is_valid(r) else ask_package_name()
def process() -> None:
# preprocess
env = Environment()
print(ASCII_ART.format(python=env.python, poetry=env.poetry))
# configure
setting = PypjSetting(python_version=env.python, package_name=ask_package_name())
if ask_yN("Do you want to customize settings?"):
setting.customize()
pypj_file_path = PypjFilePath(Path().cwd(), setting)
# define tasks
tm = TaskManager(setting, pypj_file_path)
# execute
confirm_proceed()
Poetry(setting, pypj_file_path).execute()
tm.execute()
def main() -> None:
try:
args()
process()
except PypjError as e:
print(f"Error: {e}")
sys.exit(1)
except KeyboardInterrupt:
print()
print("Canceled.")
sys.exit(1)
except Exception:
print(format_exc())
print()
print("If you are behind a proxy, try to set following environmental variables.")
print("http_proxy, https_proxy, HTTP_PROXY, HTTPS_PROXY")
print(f"Else, please report the issue to {GITHUB_URL}")
sys.exit(1)
| 28.754386 | 89 | 0.688225 |
ace62b40347bc721a1f648f0bb1ab4f4170fa7fb | 5,323 | py | Python | tests/dhcpv6/status_code/test_v6_status_code.py | isc-projects/forge | dfec8b41003d6b5a229f69ee93616e0e5cc6d71b | [
"0BSD"
] | 22 | 2015-02-27T11:51:05.000Z | 2022-02-28T12:39:29.000Z | tests/dhcpv6/status_code/test_v6_status_code.py | isc-projects/forge | dfec8b41003d6b5a229f69ee93616e0e5cc6d71b | [
"0BSD"
] | 16 | 2018-10-30T15:00:12.000Z | 2019-01-11T17:55:13.000Z | tests/dhcpv6/status_code/test_v6_status_code.py | isc-projects/forge | dfec8b41003d6b5a229f69ee93616e0e5cc6d71b | [
"0BSD"
] | 11 | 2015-02-27T11:51:36.000Z | 2021-03-30T08:33:54.000Z | """DHCPv6 Status Codes"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_msg
import srv_control
import references
import misc
@pytest.mark.v6
@pytest.mark.status_code
def test_v6_statuscode_noaddravail_solicit():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
misc.test_procedure()
srv_msg.generate_new('client')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
references.references_check('RFC3315')
@pytest.mark.v6
@pytest.mark.status_code
@pytest.mark.request
def test_v6_statuscode_noaddravail_request():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::1')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_save_option('IA_NA')
srv_msg.client_save_option('server-id')
srv_msg.client_requests_option(7)
srv_msg.client_add_saved_option()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
misc.test_procedure()
srv_msg.client_requests_option(7)
srv_msg.generate_new('IA')
srv_msg.generate_new('client')
srv_msg.client_add_saved_option(erase=True)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
references.references_check('RFC3315')
@pytest.mark.v6
@pytest.mark.status_code
@pytest.mark.renew
def test_v6_statuscode_nobinding_renew():
# when client id not known
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
# changed after rfc7550
# Response option 3 MUST contain sub-option 13.
# Response sub-option 13 from option 3 MUST contain statuscode 3.
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
references.references_check('RFC3315')
@pytest.mark.v6
@pytest.mark.status_code
@pytest.mark.renew
@pytest.mark.disabled
def test_v6_statuscode_nobinding_renew_newIA():
# when client id not known
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.change_message_filed('iaid', 66, 'int')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
# changed after rfc7550
# Response option 3 MUST contain sub-option 13.
# Response sub-option 13 from option 3 MUST contain statuscode 3.
references.references_check('RFC3315')
| 31.311765 | 69 | 0.736051 |
ace62b9d61d7c3335c1171842695fdb198f9f90d | 13,841 | py | Python | BioExp/clusters/concept.py | MiRL-IITM/BioExp | d121661bac7ae2d8c1bed7a52e9a0f550f446baa | [
"MIT"
] | null | null | null | BioExp/clusters/concept.py | MiRL-IITM/BioExp | d121661bac7ae2d8c1bed7a52e9a0f550f446baa | [
"MIT"
] | null | null | null | BioExp/clusters/concept.py | MiRL-IITM/BioExp | d121661bac7ae2d8c1bed7a52e9a0f550f446baa | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pdb
import os
import cv2
import keras
import random
import numpy as np
from glob import glob
import SimpleITK as sitk
import pandas as pd
from ..helpers.utils import *
from ..spatial.dissection import Dissector
from ..spatial.flow import singlelayercam
from keras.models import Model
from skimage.transform import resize as imresize
from keras.utils import np_utils
from keras import layers
from keras.models import Sequential
import keras.backend as tf
import matplotlib.gridspec as gridspec
from scipy.ndimage.measurements import label
from scipy.ndimage.morphology import binary_dilation, generate_binary_structure
class ConceptIdentification():
"""
Network Dissection analysis
model : keras model initialized with trained weights
layer_name : intermediate layer name which needs to be analysed
"""
def __init__(self, model, weights_pth, metric, nclasses=4):
self.model = model
self.metric = metric
self.weights = weights_pth
self.nclasses = nclasses
self.model.load_weights(self.weights, by_name = True)
def _get_layer_idx(self, layer_name):
"""
"""
for idx, layer in enumerate(self.model.layers):
if layer.name == layer_name:
return idx
def save_concepts(self, img, concepts, nrows, ncols, name, save_path=None):
"""
creats a grid of image and saves if path is given
img : test image
concepts: all features vectors
nrows : number of rows in an image
ncols : number of columns in an image
save_path : path to save an image
"""
plt.figure(figsize=(15, 15))
gs = gridspec.GridSpec(nrows, ncols)
gs.update(wspace=0.025, hspace=0.05)
for i in range(nrows):
for j in range(ncols):
try:
concept = concepts[:,:,i*nrows +(j)]
concept = np.ma.masked_where(concept == 0, concept)
ax = plt.subplot(gs[i, j])
im = ax.imshow(np.squeeze(img), cmap='gray')
im = ax.imshow(concept, alpha=0.5, cmap = plt.cm.RdBu, vmin = 0, vmax = 3)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
ax.tick_params(bottom='off', top='off', labelbottom='off' )
except:
pass
if save_path:
if not os.path.exists(save_path):
os.makedirs(save_path)
plt.savefig(os.path.join(save_path, name+'.png'), bbox_inches='tight')
else:
plt.show()
def identify(self, concept_info,
dataset_path,
save_path,
loader,
test_img,
img_ROI = None):
"""
test significance of each concepts
concept: {'concept_name', layer_name', 'filter_idxs'}
dataset_path:
save_path:
loader:
test_imgs:
img_ROI:
"""
layer_name = concept_info['layer_name']
self.dissector = Dissector(self.model, layer_name)
threshold_maps = self.dissector.get_threshold_maps(dataset_path, save_path, percentile = 85, loader=loader)
concepts = self.dissector.apply_threshold(test_img, threshold_maps,
post_process_threshold = 80,
ROI=img_ROI)
node_idxs = concept_info['filter_idxs']
concepts = concepts[:, :, node_idxs]
print (np.unique(concepts))
print ("================")
if save_path:
nrows = int(len(node_idxs)**.5) + 1
self.save_concepts(test_img, concepts, nrows, nrows, concept_info['concept_name'], save_path = save_path)
# some statistics on concepts
mean_concept = np.round(np.mean(concepts, axis=2)[:,:,None])
self.save_concepts(test_img, mean_concept, 1, 1, concept_info['concept_name']+'mean', save_path = save_path)
return concepts
def get_layer_idx(self, layer_name):
for idx, layer in enumerate(self.model.layers):
if layer.name == layer_name:
return idx
def flow_based_identifier(self, concept_info,
save_path,
test_img,
test_gt):
"""
test significance of each concepts
concept: {'concept_name', layer_name', 'filter_idxs'}
dataset_path:
save_path:
loader:
test_imgs:
img_ROI:
"""
layer_name = concept_info['layer_name']
node_idxs = concept_info['filter_idxs']
self.model.load_weights(self.weights, by_name = True)
node_idx = self.get_layer_idx(concept_info['layer_name'])
total_filters = np.arange(np.array(self.model.layers[node_idx].get_weights())[0].shape[-1])
test_filters = np.delete(total_filters, node_idxs)
layer_weights = np.array(self.model.layers[node_idx].get_weights().copy())
occluded_weights = layer_weights.copy()
for j in test_filters:
occluded_weights[0][:,:,:,j] = 0
try:
occluded_weights[1][j] = 0
except: pass
"""
for j in node_idxs:
occluded_weights[0][:,:,:,j] = 1.
occluded_weights[1][j] = 1.
"""
self.model.layers[node_idx].set_weights(occluded_weights)
model = Model(inputs = self.model.input, outputs=self.model.get_layer(concept_info['layer_name']).output)
newmodel = Sequential()
newmodel.add(model)
newmodel.add(layers.Conv2D(1,1))
# for ii in range(len(self.model.layers)):
# newmodel.layers[ii].set_weights(self.model.layers[ii].get_weights())
newmodel.layers[-1].set_weights((np.ones((1, 1, len(total_filters), 1)), np.ones(1)))
dice, information, grad = singlelayercam(newmodel, test_img, test_gt,
nclasses = 1,
save_path = save_path,
name = concept_info['concept_name'],
st_layer_idx = -1,
end_layer_idx = 1,
threshold = 0.5)
print ("[BioExp:INFO Mean Layer Dice:] ", dice, information)
return grad[0]
def _gaussian_sampler_(self, data, size, ax=-1):
shape = np.mean(data, ax).shape + (size,)
return lambda: np.std(data, -1)[..., None] * np.random.randn(*list(shape)) + np.mean(data, -1)[..., None] # np.random.normal(loc=np.mean(data, axis=ax), scale=np.std(data, axis=ax), size=size)
def concept_distribution(self, concept_info):
"""
concept_info: {'concept_name', 'layer_name', 'filter_idxs'}
return: weight_sampler, bias_sampler
"""
layer_name = concept_info['layer_name']
node_idxs = concept_info['filter_idxs']
self.model.load_weights(self.weights, by_name = True)
node_idx = self.get_layer_idx(concept_info['layer_name'])
layer_weights = np.array(self.model.layers[node_idx].get_weights().copy())
concept_weights = layer_weights[0][:,:,:, node_idxs]
try:
concept_biases = layer_weights[1][node_idxs]
return (self._gaussian_sampler_(concept_weights, len(node_idxs)), self._gaussian_sampler(concept_biases, len(node_idxs)))
except:
return (self._gaussian_sampler_(concept_weights, len(node_idxs)))
def concept_robustness(self, concept_info,
test_img,
test_gt,
nmontecarlo=3):
"""
test significance of each concepts
concept: {'concept_name', layer_name', 'filter_idxs'}
dataset_path:
save_path:
loader:
test_imgs:
img_ROI:
"""
layer_name = concept_info['layer_name']
node_idxs = concept_info['filter_idxs']
self.model.load_weights(self.weights, by_name = True)
node_idx = self.get_layer_idx(concept_info['layer_name'])
total_filters = np.arange(np.array(self.model.layers[node_idx].get_weights())[0].shape[-1])
test_filters = np.delete(total_filters, node_idxs)
layer_weights = np.array(self.model.layers[node_idx].get_weights().copy())
occluded_weights = layer_weights.copy()
for j in test_filters:
occluded_weights[0][:,:,:,j] = 0
try:
occluded_weights[1][j] = 0
except: pass
weight_sampler = self._gaussian_sampler_(occluded_weights[0][:,:,:,node_idxs], len(node_idxs))
try:
bias_sampler = self._gaussian_sampler_(occluded_weights[1][:,:,:,node_idxs], len(node_idxs))
except: pass
print (weight_sampler().shape, occluded_weights[0][:,:,:,node_idxs].shape)
gradlist = []
for _ in range(nmontecarlo):
occluded_weights[0][:,:,:,node_idxs] = weight_sampler()
try:
occluded_weights[1][node_idxs] = bias_sampler()
except: pass
self.model.layers[node_idx].set_weights(occluded_weights)
model = Model(inputs = self.model.input, outputs=self.model.get_layer(concept_info['layer_name']).output)
newmodel = Sequential()
newmodel.add(model)
newmodel.add(layers.Conv2D(1,1))
# for ii in range(len(self.model.layers)):
# newmodel.layers[ii].set_weights(self.model.layers[ii].get_weights())
newmodel.layers[-1].set_weights((np.ones((1, 1, len(total_filters), 1)), np.ones(1)))
dice, information,nclass_grad = singlelayercam(newmodel, test_img, test_gt,
nclasses = 1,
name = concept_info['concept_name'],
st_layer_idx = -1,
end_layer_idx = 1,
threshold = 0.5)
gradlist.append(nclass_grad[0])
try:
del bias_sampler
except: pass
return np.array(gradlist)
def check_robustness(self, concept_info,
save_path,
test_img,
test_gt,
save_all = False,
nmontecarlo = 4):
actual_grad = self.flow_based_identifier(concept_info,
save_path = None,
test_img = test_img,
test_gt = test_gt)
montecarlo_grad = self.concept_robustness(concept_info,
test_img,
test_gt,
nmontecarlo=nmontecarlo)
if save_path:
plt.clf()
if save_all:
plt.figure(figsize=(10*(nmontecarlo + 1), 10))
gs = gridspec.GridSpec(1, nmontecarlo + 1)
gs.update(wspace=0.025, hspace=0.05)
ax = plt.subplot(gs[0])
im = ax.imshow(actual_grad, cmap=plt.get_cmap('hot'), vmin=0, vmax=1)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
ax.set_title('actual')
ax.tick_params(bottom='off', top='off', labelbottom='off' )
for ii in range(nmontecarlo):
ax = plt.subplot(gs[ii + 1])
im = ax.imshow(montecarlo_grad[ii], cmap=plt.get_cmap('hot'), vmin=0, vmax=1)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
ax.set_title('sampled')
ax.tick_params(bottom='off', top='off', labelbottom='off')
else:
plt.figure(figsize=(10*(2), 10))
gs = gridspec.GridSpec(1, 2)
gs.update(wspace=0.025, hspace=0.05)
ax = plt.subplot(gs[0])
im = ax.imshow(actual_grad, cmap=plt.get_cmap('hot'), vmin=0, vmax=1)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
ax.set_title('actual')
ax.tick_params(bottom='off', top='off', labelbottom='off' )
ax = plt.subplot(gs[1])
im = ax.imshow(np.mean(montecarlo_grad, axis=0), cmap=plt.get_cmap('hot'), vmin=0, vmax=1)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
ax.set_title('actual')
ax.tick_params(bottom='off', top='off', labelbottom='off' )
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.2)
cb = plt.colorbar(im, ax=ax, cax=cax )
os.makedirs(save_path, exist_ok = True)
plt.savefig(os.path.join(save_path, concept_info['concept_name'] +'.png'), bbox_inches='tight')
| 38.234807 | 200 | 0.54353 |
ace62bfcc8a5af97eb49510888bec22ec212bcb6 | 12,707 | py | Python | src/04_build_model.py | yaz-saleh/DSCI_522_group_31 | 7de74fbeb578842624726b810e6d2efd88e556a4 | [
"MIT"
] | null | null | null | src/04_build_model.py | yaz-saleh/DSCI_522_group_31 | 7de74fbeb578842624726b810e6d2efd88e556a4 | [
"MIT"
] | null | null | null | src/04_build_model.py | yaz-saleh/DSCI_522_group_31 | 7de74fbeb578842624726b810e6d2efd88e556a4 | [
"MIT"
] | null | null | null | # author: Mai Le
# date: 2020-11-27
"""Reads the data from the data clean-up script, performs some statistical or
machine learning analysis and summarizes the results as a figure(s) and
a table(s).
Usage: src/04_build_model.py --data_path=<data_path> --out_report_path=<out_report_path> [--random_state=<random_state>] [--tune_params=<tune_params>]
Options:
--data_path=<data_path> The path containing train & test dataset
--out_report_path=<out_report_path> The path to export model scores in figures and tables
--random_state=<random_state> The random state that we want to use for splitting. [default: 2020]
--tune_params=<tune_params> Whether we need to tune hyperparameters or not [default: True]
"""
# region import libraries
from sklearn.compose import (
make_column_transformer,
)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.model_selection import (
RandomizedSearchCV,
cross_validate,
)
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.svm import SVC
from imblearn.over_sampling import SMOTE
from imblearn.pipeline import make_pipeline as make_imb_pipeline
import scipy
from scipy.stats import loguniform
import pickle
from sklearn.feature_selection import RFECV
from docopt import docopt
import pandas as pd
import numpy as np
import utils
import shopping_data_reader as sdr
# endregion
PARAM_DIST = "param_dist"
# region main function
def main(data_path, out_report_path, random_state, tune_params):
# read the data files, split into X and y
print("Start build_model script")
random_state = int(random_state)
tune = True if tune_params == 'True' else False
if tune:
print("We will tune the hyperparameters")
else:
print("We will use the predefined hyperamater values")
print("Read the data files, split into X and y")
X_train, y_train, X_test, y_test = sdr.read_data_as_xy(data_path)
preprocessor = make_column_transformer(
("drop", sdr.drop_features),
(StandardScaler(), sdr.numerical_features),
(OneHotEncoder(handle_unknown="ignore"), sdr.categorical_features),
(OneHotEncoder(handle_unknown="error", drop="if_binary"),
sdr.binary_features),
)
# tuning hyperparameters for SVC, RandomForestClassifier and
# LogisticRegression
print("Process models")
hyperparams_best_model = tune_hyperparams(
preprocessor, X_train, y_train, random_state, tune
)
# find the best model
print("Finding the best model using cross validation")
_, best_model, _ = find_best_model(
hyperparams_best_model,
X_train,
y_train,
random_state,
data_path + "/model_selection_result.csv",
)
# get result plots
print("Creating plots and classification report")
plot, class_report = utils.plot_results(
best_model, X_test, y_test, ["No-revenue", "Revenue"]
)
# save plots to report path
print("Saving reports")
utils.save_plots(
out_report_path,
plot,
class_report,
("confusion_matrix", "classification_report"),
)
# save model to disk
print("Saving the best model for later use")
pickle.dump(best_model, open(data_path + "/best_model.sav", "wb"))
# try feature selection
print("Building the model with RFE")
fs_model = make_pipeline(
preprocessor,
RFECV(Ridge(random_state=random_state), cv=10),
RandomForestClassifier(max_depth=12, n_estimators=275),
)
fs_model.fit(X_train, y_train)
plot, class_report = utils.plot_results(
fs_model, X_test, y_test, ["No-revenue", "Revenue"]
)
# save plots to report path
print("Saving reports")
utils.save_plots(
out_report_path,
plot,
class_report,
(
"confusion_matrix_feature_selection",
"classification_report_feature_selection",
),
)
print("End build_model script")
return
def create_logistic_regression_model(
random_state, tune=True, class_balanced=True
):
"""Create a logistic regression model using best hyperparameters or tuning
Parameters
----------
tune : bool, optional
tune the hyperparameter or using the best values, by default True
Returns
-------
Logistic Regression Model
The model we want to create
"""
class_weight = "balanced"
if not class_balanced:
class_weight = None
model = {
"clf": LogisticRegression(
class_weight=class_weight,
random_state=random_state,
max_iter=1000
)
}
if tune:
model[PARAM_DIST] = {
"logisticregression__C": loguniform(1e-3, 1e3)
}
else:
if class_balanced:
model[PARAM_DIST] = {
"logisticregression__C": [0.008713608033492446]
}
else:
model[PARAM_DIST] = {
"logisticregression__C": [0.008713608033492446]
}
return model
def create_random_forest_model(
random_state, tune=True, class_balanced=True
):
"""Create a random forest model using best hyperparameters or tuning
Parameters
----------
tune : bool, optional
tune the hyperparameter or using the best values, by default True
Returns
-------
Random Forest Model
The model we want to create
"""
class_weight = "balanced"
if not class_balanced:
class_weight = None
model = {
"clf": RandomForestClassifier(
class_weight=class_weight,
random_state=random_state
)
}
if tune:
model[PARAM_DIST] = {
"randomforestclassifier__n_estimators":
scipy.stats.randint(low=10, high=300),
"randomforestclassifier__max_depth":
scipy.stats.randint(low=2, high=20)
}
else:
if class_balanced:
model[PARAM_DIST] = {
"randomforestclassifier__n_estimators": [65],
"randomforestclassifier__max_depth": [12],
}
else:
model[PARAM_DIST] = {
"randomforestclassifier__n_estimators": [170],
"randomforestclassifier__max_depth": [18],
}
return model
def create_SVC_model(
random_state, tune=True, class_balanced=True
):
"""Create a SVC model using best hyperparameters or tuning
Parameters
----------
tune : bool, optional
tune the hyperparameter or using the best values, by default True
Returns
-------
SVC Model
The model we want to create
"""
class_weight = "balanced"
if not class_balanced:
class_weight = None
model = {
"clf": SVC(
class_weight=class_weight,
random_state=random_state
)
}
if tune:
model[PARAM_DIST] = {
"svc__gamma": [0.1, 1.0, 10, 100],
"svc__C": [0.1, 1.0, 10, 100],
}
else:
if class_balanced:
model[PARAM_DIST] = {
"svc__gamma": [0.1],
"svc__C": [1.0]
}
else:
model[PARAM_DIST] = {
"svc__gamma": [0.1],
"svc__C": [1.0]
}
return model
def tune_hyperparams(preprocessor, X, y, random_state, tune=True):
"""tuning hyperparameters for LogisticRegression, RandomForestClassifier
and SVC with preprocessor on X, y with random_state using
RandomizedSearchCV
Args:
preprocessor (Pipeline/ColumnTransformer): a Pipeline to transform X
X (DataFrame): features
y (DataFrame): target
random_state (integer): random state
Returns:
dict: a dictionary with key=model's name,
value={"best_model":best_model, "best_params":best_params}
Examples:
perparams_best_model = tune_hyperparams(preprocessor, X_train, y_train,
2020)
"""
classifiers = {
"Logistic Regression Balanced":
create_logistic_regression_model(random_state, tune),
"Logistic Regression SMOTE":
create_logistic_regression_model(random_state, tune, False),
"Random Forest Balanced":
create_random_forest_model(random_state, tune),
"Random Forest SMOTE":
create_random_forest_model(random_state, tune, False),
"SVC Balanced":
create_SVC_model(random_state, tune),
"SVC SMOTE":
create_SVC_model(random_state, tune, False)
}
hyperparams_best_model = {}
n_iter = 10 if tune else 1
# find the best hyperparameters of each model
for name, model_dict in classifiers.items():
print("Processing", name)
if "SMOTE" in name:
pipe = make_imb_pipeline(preprocessor, SMOTE(), model_dict["clf"])
else:
pipe = make_pipeline(preprocessor, model_dict["clf"])
random_search = RandomizedSearchCV(
pipe,
param_distributions=model_dict[PARAM_DIST],
n_iter=n_iter,
verbose=1,
n_jobs=-1,
scoring="f1",
cv=10,
random_state=random_state,
)
random_search.fit(X, y)
hyperparams_best_model[name] = {
"best_model": random_search.best_estimator_,
"best_params": random_search.best_params_,
}
# print(hyperparams_best_model)
return hyperparams_best_model
def find_best_model(
hyperparams_best_model,
X,
y,
random_state,
path="../data/processed/model_selection_result.csv"
):
"""find the best model among 3 models using cross validation
Args:
hyperparams_best_model (dict): a dictionary with key=model's name,
value={"best_model":best_model, "best_params":best_params}
X (DataFrame): features
y (DataFrame): target
random_state (int): random_state
Returns:
(string, Pipeline, list): a tuple consisting of best model's name,
best model object and its best hyperparameters
Examples:
best_model_name, best_model, best_params =
find_best_model(hyperparams_best_model, X_train, y_train, 2020)
"""
scoring = ["accuracy", "recall", "f1"]
clf_score_map = [
"fit_time",
"score_time",
"test_accuracy",
"test_f1",
"test_recall",
]
results = {}
for name, model in hyperparams_best_model.items():
cv_scores = cross_validate(
model["best_model"], X, y, cv=10, n_jobs=-1, scoring=scoring
)
utils.store_cross_val_results(clf_score_map, name, cv_scores, results)
results_df = pd.DataFrame(results).T
print(results_df)
results_df.reset_index().to_csv(path)
best_model_name = results_df.iloc[np.argmax(results_df["test_f1"])].name
best_model = hyperparams_best_model[best_model_name]["best_model"]
best_params = hyperparams_best_model[best_model_name]["best_params"]
return (best_model_name, best_model, best_params)
def retrieve_important_features(
preprocessor,
X_train,
y_train,
numerical_features,
categorical_features,
binary_features,
random_state
):
"""return a list of features that are important in predicting the target
using RFE
Args:
preprocessor (ColumnTransformer): feature transformation
X_train (DataFrame): train feature set
y_train (DataFrame): train target
random_state (int): random_state
Returns:
[list]: list of important features
Examples:
retrieve_important_features(preprocessor, X_train, y_train, 2020)
"""
rfecv = make_pipeline(
preprocessor, RFECV(Ridge(random_state=random_state), cv=10)
)
rfecv.fit(X_train, y_train)
ohe_columns = list(
rfecv.named_steps["columntransformer"]
.transformers_[2][1]
.get_feature_names(categorical_features)
)
bin_columns = list(
rfecv.named_steps["columntransformer"]
.transformers_[3][1]
.get_feature_names(binary_features)
)
new_columns = numerical_features + ohe_columns + bin_columns
new_columns = np.array(new_columns)
return new_columns[rfecv.named_steps["rfecv"].support_]
if __name__ == "__main__":
opt = docopt(__doc__)
main(
opt["--data_path"],
opt["--out_report_path"],
opt["--random_state"],
opt["--tune_params"],
)
| 28.427293 | 150 | 0.640828 |
ace62c822cccc06e79b5a952184a37906a43b829 | 12,945 | py | Python | AudioBeamformer/Modules/Beamsteering.py | BA-OST-2022/audio-beamformer-software | 3052a74b01676701ece403b06f637f6cc4b4a5d9 | [
"MIT"
] | null | null | null | AudioBeamformer/Modules/Beamsteering.py | BA-OST-2022/audio-beamformer-software | 3052a74b01676701ece403b06f637f6cc4b4a5d9 | [
"MIT"
] | null | null | null | AudioBeamformer/Modules/Beamsteering.py | BA-OST-2022/audio-beamformer-software | 3052a74b01676701ece403b06f637f6cc4b4a5d9 | [
"MIT"
] | null | null | null | ###############################################################################
# file AudioProcessing.py
###############################################################################
# brief This module handels the beamsteering for the audio-beamformer
###############################################################################
# author Florian Baumgartner & Thierry Schwaller
# version 1.0
# date 2022-05-04
###############################################################################
# MIT License
#
# Copyright (c) 2022 ICAI Interdisciplinary Center for Artificial Intelligence
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
import os
import ast
import threading
import numpy as np
import time
from pathlib import Path
from Plotter import WindowPlotter
DEBUG = False
class Beamsteering():
def __init__(self,
sensors = None,
facetracking = None,
fpgaControl = None,
leds=None):
# Module init
self._fpga_controller = fpgaControl
self._sensors = sensors
self._facetracking = facetracking
self._leds = leds
# Constants
self.__distance = 0.01475
self.__row_count = 19
# LED
self._COLOR_GRAD_PEAK = np.array([1.00, 0.40, 0.00])
self._COLOR_GRAD_LOW = np.array([0.05, 0.20, 0.95])
self._COLOR_DEAFULT = np.array([0.50, 0.87, 0.92])
# Camera angle in degree
self.__max_angle_camera = 40
# Var init
self._initialized = False
self._runThread = False
self._updateRate = 2
self._ledsUpdateRate = 12
self._timeTemp = 0
# Beamsteering
self._beamsteeringEnable = False
self._beamsteeringSources = {0: "Camera", 1: "Manual", 2: "Pattern"}
self._currSteerSource = 0
self._angleToSteer = 0
self._angleToSteer_faceTracking = 0
self._angleToSteer_manual = 0
self._beamfocusing_enable = False
self.__beamfocusing_radius = 3 # Beamfocusing radius is set to three
# Pattern
self._beamsteeringPattern = {}
self.__pattern_dict_path = os.path.dirname(os.path.realpath(__file__)) + "/Files/beamsteering_pattern.txt"
with open(self.__pattern_dict_path, encoding="utf-8") as f:
for line in f.readlines():
line_tupel = ast.literal_eval(line)
self._beamsteeringPattern[line_tupel[0]] = line_tupel[1:]
self.setBeamsteeringPattern(0)
self._currentPattern = 0
self._PatternHoldTime = 1
# Window
self.__window_types = {"Rectangle": self.rectWindow(),
"Cosine": self.cosineWindow(),
"Hann": self.hannWindow(),
"Hamming": self.hammingWindow(),
"Blackman": self.blackmanWindow(),
"Dolph-Chebyshev": self.chebyWindow()}
self._activeWindow = "Rectangle"
self._enableChannel = np.ones(self.__row_count)
self._gains = np.ones(self.__row_count)
self._plotter = WindowPlotter(250, int(250 * 0.517))
self.generatePlot()
def begin(self):
if not self._initialized:
self._initialized = True
self._runThread = True
self.update()
def end(self):
self._runThread = False
def update(self):
if(self._initialized):
if(self._runThread):
# Update rate for the LEDS
threading.Timer(1.0 / self._ledsUpdateRate, self.update).start()
else:
return
self.setAngle()
self.setLEDS()
# Update rate for the angle
if(time.time() - self._timeTemp > 1 / self._updateRate):
self._timeTemp = time.time()
if(self._beamsteeringEnable or self._beamfocusing_enable):
self.calculateDelay()
def generatePlot(self):
for i,elem in enumerate(self.__window_types.keys()):
path = Path(os.path.dirname(__file__)).parents[0] / f"GUI/qml/images/window_{i}.svg"
taps = self.__window_types[elem]
self._plotter.generatePlot(taps,path)
def enableBeamsteering(self,value):
self._beamsteeringEnable = value
# If beamsteering is being turne of set angle to zero and calculate delay
if value==0:
self._angleToSteer = 0
self.calculateDelay()
def setBeamsteeringSource(self, source):
self._currSteerSource = source
def enableBeamfocusing(self,enable):
self._beamfocusing_enable = enable
def setBeamfocusingRadius(self,radius):
self.__beamfocusing_radius = radius
def setBeamsteeringAngle(self, angle):
self._angleToSteer_manual = angle
def setBeamsteeringPattern(self, pattern):
name = list(self._beamsteeringPattern.keys())[pattern]
min_angle, max_angle, steps, time = self._beamsteeringPattern[name]
self._activePattern = np.linspace(min_angle,max_angle, steps)
self._PatternHoldTime = time
def getBeamsteeringPattern(self):
return list(self._beamsteeringPattern.keys())
def setChannelEnable(self,list):
self._enableChannel = np.array(list)
self._fpga_controller.enableChannels(list)
self._fpga_controller.update()
def _calc_angle_face(self):
# TODO Measure camera angle
max_image_size_x = 680
angle = 0
# If facetracking found
if self._facetracking:
# Get position from facetracking
position = self._facetracking.getFocusLocation()
# If position avaiable
if len(position) > 1:
# Zero at center
x_pos = max_image_size_x/2-position[0]
# Calculate angle
distance = max_image_size_x / (2*np.tan(self.__max_angle_camera/ 180 * np.pi))
angle = np.arctan(x_pos / distance)* 180 / np.pi
return angle
def setLEDS(self):
#TODO Check if works with self.__max_angle_camera
min_angle = -45
max_angle = 45
# Where should the peak be
peak = self._angleToSteer / (max_angle - min_angle) * self.__row_count + self.__row_count // 2
# Calc difference vector and scale
color_gradient = (self._COLOR_GRAD_LOW - self._COLOR_GRAD_PEAK)/ (np.ceil(np.abs(peak - self.__row_count // 2)) + self.__row_count // 2)
leds_display = np.ones((self.__row_count,3))
if self._beamsteeringEnable:
for i,elem in enumerate(leds_display):
distance = np.abs(i - peak)
leds_display[i,:] = self._COLOR_GRAD_PEAK + distance * color_gradient
leds_display = np.abs(leds_display)
else:
leds_display *= self._COLOR_DEAFULT
# Window Brightness Overlay
leds_display *= np.abs(np.column_stack((self._gains,self._gains, self._gains)))
# Channel Enable Overlay
leds_display *= np.column_stack((self._enableChannel,self._enableChannel, self._enableChannel))
self._leds.setChannelColors(leds_display)
if self._currSteerSource != 0: # Turn off LEDs if not in camera mode
self._leds.setCameraAnimation(self._leds.OFF)
else:
if self._facetracking.getDetectionCount() == 0:
self._leds.setCameraAnimation(self._leds.SEARCHING)
else:
self._leds.setCameraAnimation(self._leds.TRACKING)
def setAngle(self):
# Face Tracking
if (self._currSteerSource == 0):
self._angleToSteer = self._calc_angle_face()
# Manual
elif (self._currSteerSource == 1):
self._angleToSteer = self._angleToSteer_manual
# Pattern
else:
self._angleToSteer = self._activePattern[int(time.time()/self._PatternHoldTime % len(self._activePattern))]
def calculateDelay(self):
# Check if delay allowed
maxDelay = self._fpga_controller.getMaxChannelDelay()
# If angle below 1 degree set delay to zero
if abs(self._angleToSteer) >= 1 and self._beamsteeringEnable:
delay = np.arange(self.__row_count) * (self.__distance / self.getSpeedOfSound()) * np.sin(self._angleToSteer/180*np.pi)
# Make all delays positive
if (np.sin(self._angleToSteer/180*np.pi) < 0):
delay = delay[::-1] * -1
else:
delay = np.zeros(self.__row_count)
if self._beamfocusing_enable:
focus_delay = self.__distance**2/(2*self.__beamfocusing_radius*self.getSpeedOfSound()) * np.arange(self.__row_count) * (self.__row_count - np.arange(self.__row_count) - 1)
tot_delay = delay + focus_delay
tot_delay -= min(tot_delay)
if not np.any(np.max(tot_delay) >= maxDelay):
delay = tot_delay
else:
print(f"Beamfocusing was not applied")
print(delay)
if np.any(delay >= maxDelay):
print(f"Wrong angle: {delay}")
delay = np.clip(delay, 0, maxDelay)
self._fpga_controller.setChannelDelay(delay)
self._fpga_controller.update()
def calculateGains(self):
self._gains = np.array(self.__window_types[self._activeWindow])
if not DEBUG:
self._fpga_controller.setChannelGain(self._gains)
self._fpga_controller.update()
else:
print(f"Gains: {np.array(self._gains)}")
def getSpeedOfSound(self):
if self._sensors:
temp = self._sensors.getTemperature(self._sensors.SRC_AMBIENT)
if not np.isnan(temp):
return 331.5 + 0.607 * temp
return 343.3
def setWindowProfile(self, profile):
self._activeWindow = self.__window_list[profile]
self.calculateGains()
def getWindowProfileList(self):
self.__window_list = list(self.__window_types.keys())
return list(self.__window_types.keys())
# Window types
def rectWindow(self):
gains = [1] * self.__row_count
return gains
def cosineWindow(self):
gains = np.sin(np.arange(self.__row_count)*np.pi/(self.__row_count-1))
gains /= max(gains)
return gains
def hannWindow(self):
gains = np.sin(np.arange(self.__row_count)*np.pi/(self.__row_count-1))**2
gains /= max(gains)
return gains
def hammingWindow(self):
gains = 0.54 - 0.46 * np.cos(2*np.arange(self.__row_count)*np.pi/(self.__row_count-1))
gains /= max(gains)
return gains
def blackmanWindow(self):
gains = 0.42 - 0.5 * np.cos(2*np.arange(self.__row_count)*np.pi/(self.__row_count-1)) + 0.08 * np.cos(4*np.arange(self.__row_count)*np.pi/(self.__row_count-1))
gains /= max(gains)
return gains
def chebyWindow(self):
alpha = 5
beta = np.cosh(1/self.__row_count*np.arccosh(10**alpha))
freq_dom = np.array([self.chebyPol(beta*np.cos(np.pi * val /(self.__row_count+1)))/self.chebyPol(beta) for val in np.arange(self.__row_count)])
gains = np.real(np.fft.fftshift(np.fft.ifft(freq_dom)))
gains /= max(gains)
return gains
def chebyPol(self, val):
N = self.__row_count
if val <= -1:
return (-1)**N*np.cosh(N*np.arccosh(-val))
elif val >= 1:
return np.cosh(N*np.arccosh(val))
else:
return np.cos(N*np.arccos(val))
if __name__ == '__main__':
beamsteering = Beamsteering()
| 39.346505 | 183 | 0.597837 |
ace62cbd3007d378ae4e8db710679941ff5b8d1c | 305 | py | Python | django/core/urls.py | devm1023/angular-Django | a5fcc223467e816b74b4f17a9f1e149c93e9b786 | [
"MIT"
] | null | null | null | django/core/urls.py | devm1023/angular-Django | a5fcc223467e816b74b4f17a9f1e149c93e9b786 | [
"MIT"
] | null | null | null | django/core/urls.py | devm1023/angular-Django | a5fcc223467e816b74b4f17a9f1e149c93e9b786 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from .views import check_logged_in, do_login, do_logout
urlpatterns = [
url(regex=r'^checkloggedin$', view=check_logged_in, name='check_logged_in'),
url(regex=r'^login$', view=do_login, name='login'),
url(regex=r'^logout$', view=do_logout, name='logout'),
]
| 30.5 | 80 | 0.714754 |
ace62d41fab943352827cac0d9c30edcad4f2335 | 947 | py | Python | examples/python_app.py | vamshi091211/pyinfra | 6e14b039422e00ebc68110eabbc6a3a543c96279 | [
"MIT"
] | 1 | 2022-03-24T05:44:45.000Z | 2022-03-24T05:44:45.000Z | examples/python_app.py | marinakravchenko21/pyinfra | 6e14b039422e00ebc68110eabbc6a3a543c96279 | [
"MIT"
] | null | null | null | examples/python_app.py | marinakravchenko21/pyinfra | 6e14b039422e00ebc68110eabbc6a3a543c96279 | [
"MIT"
] | 1 | 2021-11-12T18:36:01.000Z | 2021-11-12T18:36:01.000Z | from pyinfra import host
from pyinfra.operations import git, pip, server
# Ensure the state of git repositories
git.repo(
{'Clone pyinfra repository'},
'git@github.com:Fizzadar/pyinfra',
host.data.app_dir,
branch='develop',
ssh_keyscan=True,
sudo=True,
# Carry SSH agent details w/sudo
preserve_sudo_env=True,
)
# Manage pip packages
did_install = pip.packages(
{'Install virtualenv with pip'},
['virtualenv'],
sudo=True,
)
# Use operation meta to affect the deploy
if did_install.changed:
server.shell(
'echo "Clean package build/etc"',
)
# Create a virtualenv
server.shell(
{'Setup the virtualenv'},
'virtualenv {{ host.data.env_dir }}',
sudo=True,
sudo_user='pyinfra',
)
# and manage pip within it
pip.packages(
{'Install Python packages with pip'},
['ElasticQuery', 'JsonTest'],
virtualenv=host.data.env_dir,
sudo=True,
sudo_user='pyinfra',
)
| 21.522727 | 47 | 0.671595 |
ace62d83bcff6c61e573c068bbf301de846c273f | 301 | py | Python | Dataset/Leetcode/train/27/33.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/27/33.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/27/33.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, nums: List[int], val: int) -> int:
left = 0
right = 0
while right<len(nums):
if nums[right] != val:
nums[left],nums[right] = nums[right],nums[left]
left+=1
right+=1
return left
| 25.083333 | 63 | 0.465116 |
ace6309bb0986cc02a728be7a60a9a33bfa90735 | 545 | py | Python | core/src/trezor/messages/NEMMosaic.py | Kayuii/trezor-crypto | 6556616681a4e2d7e18817e8692d4f6e041dee01 | [
"MIT"
] | null | null | null | core/src/trezor/messages/NEMMosaic.py | Kayuii/trezor-crypto | 6556616681a4e2d7e18817e8692d4f6e041dee01 | [
"MIT"
] | 1 | 2019-02-08T00:22:42.000Z | 2019-02-13T09:41:54.000Z | core/src/trezor/messages/NEMMosaic.py | Kayuii/trezor-crypto | 6556616681a4e2d7e18817e8692d4f6e041dee01 | [
"MIT"
] | 2 | 2019-02-07T23:57:09.000Z | 2020-10-21T07:07:27.000Z | # Automatically generated by pb2py
# fmt: off
import protobuf as p
class NEMMosaic(p.MessageType):
def __init__(
self,
namespace: str = None,
mosaic: str = None,
quantity: int = None,
) -> None:
self.namespace = namespace
self.mosaic = mosaic
self.quantity = quantity
@classmethod
def get_fields(cls):
return {
1: ('namespace', p.UnicodeType, 0),
2: ('mosaic', p.UnicodeType, 0),
3: ('quantity', p.UVarintType, 0),
}
| 21.8 | 47 | 0.544954 |
ace632cf4cd87c98b613950e59c29084eb985702 | 410 | py | Python | openacademy/model/partner.py | jorgescalona/openacademy-project | dde32900bcad10dd9530fdc56e9c8e9d95ace04c | [
"Apache-2.0"
] | null | null | null | openacademy/model/partner.py | jorgescalona/openacademy-project | dde32900bcad10dd9530fdc56e9c8e9d95ace04c | [
"Apache-2.0"
] | null | null | null | openacademy/model/partner.py | jorgescalona/openacademy-project | dde32900bcad10dd9530fdc56e9c8e9d95ace04c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from openerp import fields, models
class Partner(models.Model):
_inherit = 'res.partner'
# Add a new column to the rest partner model ....
instructor = fields.Boolean('Instructor', default=False)
session_ids = fields.Many2many('openacademy.session',
string="Sessions as attendee",
readonly=True)
| 29.285714 | 65 | 0.582927 |
ace632dee27219b16d65e5445a113f36b95448f4 | 11,247 | py | Python | vdist/console_parser.py | dante-signal31/vdist | 00a123d3a5361960498c915163f0812c46214ce6 | [
"MIT"
] | 21 | 2017-02-25T01:59:53.000Z | 2021-06-26T16:51:42.000Z | vdist/console_parser.py | kayodebristol/vdist | b135f81c01e5bf32b37295a9620274b346271e71 | [
"MIT"
] | 5 | 2017-06-15T13:58:39.000Z | 2021-09-18T17:06:32.000Z | vdist/console_parser.py | kayodebristol/vdist | b135f81c01e5bf32b37295a9620274b346271e71 | [
"MIT"
] | 6 | 2017-11-22T23:40:30.000Z | 2022-01-19T21:08:55.000Z | import argparse
import os.path
from typing import Dict
def _check_is_file(_string: str) -> str:
if os.path.isfile(_string):
return _string
else:
raise argparse.ArgumentTypeError("{0} file does "
"not exists.".format(_string))
# TODO: Some defaults are redundant with defaults at object creation. Fix it.
def parse_arguments(args: list=None) -> Dict[str, str]:
arg_parser = argparse.ArgumentParser(description="A tool that lets you "
"create OS packages from "
"your Python applications "
"in a clean and self "
"contained manner.\n",
epilog="Follow vdist development at: "
"<https://github.com/dante-signal31/vdist>")
# There is a bug in Python 3.5 argparser that makes that missing arguments
# don't raise a "too few arguments". While that bug is finally fixed, there
# is a workaround in:
# http://stackoverflow.com/questions/23349349/argparse-with-required-subparser
subparsers = arg_parser.add_subparsers(help="Available modes",
dest="mode")
subparsers.required = True
automatic_subparser = subparsers.add_parser("batch",
help="Automatic configuration. "
"Parameters are going to "
" be read from a"
"configuration file.")
automatic_subparser.add_argument("configuration_file",
nargs="?",
default=None,
type=_check_is_file,
metavar="CONFIGURATION FILENAME")
# [TODO] Console does not get output_script right when entered from console
# in batch mode.
automatic_subparser.add_argument("--output_script",
required=False,
help="Copy build script in output folder.",
action="store_const",
const=True,
default=False)
manual_subparser = subparsers.add_parser("manual",
help="Manual configuration. "
"Parameters are going to be "
"provided through flags.")
manual_subparser.add_argument("-a", "--app",
required=True,
help="Name of application to package.",
metavar="APP_NAME")
manual_subparser.add_argument("-v", "--version",
required=True,
help="Version of application to package.",
metavar="APP_VERSION")
source = manual_subparser.add_mutually_exclusive_group(required=True)
source.add_argument("-g", "--source_git",
help="Location of remote git repository to build from.",
metavar="APP_GIT")
source.add_argument("-G", "--source_git_directory",
help="Location of local git repository to build from.",
metavar="APP_GIT_DIRECTORY")
source.add_argument("-d", "--directory",
help="Location of local directory to build from.",
metavar="APP_DIRECTORY")
manual_subparser.add_argument("-p", "--profile",
required=True,
help="Build profile",
metavar="BUILD_PROFILE")
manual_subparser.add_argument("-n", "--name",
required=False,
help="Build name",
metavar="BUILD_NAME")
manual_subparser.add_argument("-b", "--build_deps",
required=False,
nargs="*",
help="Build dependencies.",
metavar="BUILD_DEPENDENCIES")
manual_subparser.add_argument("-r", "--runtime_deps",
required=False,
nargs="*",
help="Runtime dependencies.",
metavar="RUNTIME_DEPENDENCIES")
manual_subparser.add_argument("-c", "--custom_filename",
required=False,
help="Custom filename for generated package.",
metavar="CUSTOM_FILENAME")
manual_subparser.add_argument("-f", "--fpm_args",
required=False,
help="Extra arguments for FPM. (Put text "
"between quotes)",
metavar="FPM_ARGS")
manual_subparser.add_argument("-i", "--pip_args",
required=False,
help="Extra arguments for PIP. (Put text "
"between quotes)",
metavar="PIP_ARGS")
manual_subparser.add_argument("-B", "--python_basedir",
required=False,
help="Base directory were python "
"distribution "
"that is going to be packaged is placed "
"inside container. (Defaults to '/opt')",
metavar="PYTHON_BASEDIR")
manual_subparser.add_argument("-R", "--package_install_root",
required=False,
help="Base directory were this package "
"is going to be installed in target "
"system. (Defaults to 'python_basedir')",
metavar="INSTALL_ROOT")
manual_subparser.add_argument("-P", "--package_tmp_root",
required=False,
help="Temporal folder used in docker "
"container to build your package. "
"(Defaults to '/tmp')",
metavar="TMP_ROOT")
manual_subparser.add_argument("-D", "--working_dir",
required=False,
help="Subdirectory under your source tree "
"that is to be regarded as the base "
"directory",
metavar="WORKING_DIR")
manual_subparser.add_argument("-C", "--compile_python",
required=False,
help="Indicates Python should be fetched "
"from python.org, compiled and shipped "
"for you.",
action="store_const",
const="True",
default="False")
manual_subparser.add_argument("-V", "--python_version",
required=False,
help="Python version to package.",
metavar="PYTHON_VERSION")
manual_subparser.add_argument("-t", "--requirements_path",
required=False,
help="Path to your pip requirements file, "
"relative to your project root. "
"(Defaults to */requirements.txt*).",
metavar="REQUIREMENTS_PATH")
manual_subparser.add_argument("-o", "--output_folder",
required=False,
help="Folder where generated packages should "
"be placed.",
metavar="OUTPUT_FOLDER")
manual_subparser.add_argument("--output_script",
required=False,
help="Copy build script in output folder.",
action="store_const",
const=True,
default=False)
# WARNING: Keep package scripts arguments names similar to fpm arguments for
# scripts. Arguments names from here are directly used as fpm arguments
# names.
manual_subparser.add_argument("--after_install",
required=False,
help="A script to be run after package "
"installation.",
metavar="AFTER_INSTALL_SCRIPT")
manual_subparser.add_argument("--before_install",
required=False,
help="A script to be run before package "
"installation.",
metavar="BEFORE_INSTALL_SCRIPT")
manual_subparser.add_argument("--after_remove",
required=False,
help="A script to be run after package "
"removal.",
metavar="AFTER_REMOVE_SCRIPT")
manual_subparser.add_argument("--before_remove",
required=False,
help="A script to be run before package "
"removal.",
metavar="BEFORE_REMOVE_SCRIPT")
manual_subparser.add_argument("--after_upgrade",
required=False,
help="A script to be run after package "
"upgrade.",
metavar="AFTER_UPGRADE_SCRIPT")
manual_subparser.add_argument("--before_upgrade",
required=False,
help="A script to be run before package "
"upgrade",
metavar="BEFORE_UPGRADE_SCRIPT")
parsed_arguments = vars(arg_parser.parse_args(args))
filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()
if value is not None}
return filtered_parser_arguments
| 57.974227 | 92 | 0.430159 |
ace63306e7350317a8fbcf5ec984def6d3ce47af | 640 | py | Python | dbio/auth/cli.py | DataBiosphere/data-store-cli | d83776ac68d3c6c86de1714487803bbdf705fddf | [
"MIT"
] | null | null | null | dbio/auth/cli.py | DataBiosphere/data-store-cli | d83776ac68d3c6c86de1714487803bbdf705fddf | [
"MIT"
] | 41 | 2020-01-13T22:19:19.000Z | 2020-03-16T23:11:37.000Z | dbio/auth/cli.py | DataBiosphere/data-store-cli | d83776ac68d3c6c86de1714487803bbdf705fddf | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from . import AuthClient
def add_commands(subparsers, help_menu=False):
auth_parser = subparsers.add_parser('auth', help="Interact with the authorization and authentication system.")
def help(args):
auth_parser.print_help()
if sys.version_info >= (2, 7, 9): # See https://bugs.python.org/issue9351
auth_parser.set_defaults(entry_point=help)
auth_subparsers = auth_parser.add_subparsers()
auth_cli_client = AuthClient()
auth_cli_client.build_argparse_subparsers(auth_subparsers, help_menu=help_menu)
| 33.684211 | 114 | 0.764063 |
ace6337538ae0ab14ce2df735dc1e978fab754fe | 2,168 | py | Python | tools/harness/tests/clockdrift.py | HawxChen/barrelfishOS | d8fbca601bfac61cc92ac06f5f5dd79ab18a8eaa | [
"MIT"
] | 81 | 2015-01-02T23:53:38.000Z | 2021-12-26T23:04:47.000Z | tools/harness/tests/clockdrift.py | salivarick/barrelfish | 252246b89117b0a23f6caa48a8b166c7bf12f885 | [
"MIT"
] | 1 | 2016-09-21T06:27:06.000Z | 2016-10-05T07:16:28.000Z | tools/harness/tests/clockdrift.py | salivarick/barrelfish | 252246b89117b0a23f6caa48a8b166c7bf12f885 | [
"MIT"
] | 7 | 2015-03-11T14:27:15.000Z | 2017-11-08T23:03:45.000Z | ##########################################################################
# Copyright (c) 2009, 2010, ETH Zurich.
# All rights reserved.
#
# This file is distributed under the terms in the attached LICENSE file.
# If you do not find this file, copies can be found by writing to:
# ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
##########################################################################
import re
import debug, tests, datetime
from common import TestCommon, TimeoutError
from results import RowResults
# timeout for a complete run, including setup etc.
CLOCKDRIFT_TIMEOUT = datetime.timedelta(hours=13)
@tests.add_test
class ClockDriftTest(TestCommon):
''' APIC clock drift test '''
name = "clockdrift_apic"
def get_modules(self, build, machine):
modules = super(ClockDriftTest, self).get_modules(build, machine)
modules.add_module("apicdrift_bench", [machine.get_ncores()])
return modules
def get_finish_string(self):
return "client done."
def boot(self, *args):
super(ClockDriftTest, self).boot(*args)
self.set_timeout(CLOCKDRIFT_TIMEOUT)
def process_data(self, testdir, rawiter):
corestr = None
lastdata = None
for line in rawiter:
m = re.match("Running on (\d+) cores.", line)
if m:
ncores = int(m.group(1))
results = RowResults(["core %d to %d" % (n, (n + 1) % ncores) for n in range(ncores)])
corestr = "\d+: "
for n in range(ncores):
corestr += "(\d+) "
continue
if corestr != None:
m = re.match(corestr, line)
if m:
data = [int(m.group(n)) for n in range(1, ncores + 1)]
if lastdata != None:
diffs = [data[0] - lastdata]
else:
diffs = [0]
diffs += [(data[n] - data[n - 1]) for n in range(1, ncores)]
results.add_row(diffs)
lastdata = data[ncores - 1]
return results
| 34.967742 | 102 | 0.523063 |
ace634b3e0f52b7d00caf89c8ccdab494c503456 | 1,743 | py | Python | conet/datasets/dataset.py | steermomo/conet | 21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b | [
"MIT"
] | null | null | null | conet/datasets/dataset.py | steermomo/conet | 21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b | [
"MIT"
] | null | null | null | conet/datasets/dataset.py | steermomo/conet | 21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b | [
"MIT"
] | 1 | 2020-05-18T10:05:24.000Z | 2020-05-18T10:05:24.000Z | import math
import os
import random
from os import path
import albumentations as alb
from albumentations.pytorch import ToTensorV2
from skimage.color import gray2rgb
import cv2
import numpy as np
from torch.utils.data import Dataset
import pickle
from conet.config import get_cfg
train_aug = alb.Compose([
# alb.RandomSizedCrop(min_max_height=(300, 500)),
alb.RandomScale(),
alb.HorizontalFlip(),
alb.VerticalFlip(),
alb.RandomBrightness(limit=0.01),
alb.Rotate(limit=30),
alb.PadIfNeeded(520, border_mode=cv2.BORDER_REFLECT101),
alb.RandomCrop(512, 512),
alb.Normalize(),
# alb.pytorch.ToTensor(),
ToTensorV2()
])
val_aug = alb.Compose([
# alb.PadIfNeeded(512, border_mode=cv2.BORDER_REFLECT101),
alb.Normalize(),
alb.Resize(512, 512),
ToTensorV2(),
])
class DukeOctDataset(Dataset):
def __init__(self, split='train'):
cfg = get_cfg()
self.cfg = cfg
self.data_dir = cfg.data_dir
with open(path.join(cfg.data_dir, 'split.dp'), 'rb') as infile:
self.d_split = pickle.load(infile)
self.split = split
self.d_basefp = self.d_split[split]
if split == 'train':
self.aug = train_aug
elif split == 'val':
self.aug = val_aug
else:
raise NotImplementedError
def __len__(self):
return len(self.d_basefp)
def __getitem__(self, idx):
carr = np.load(path.join(self.data_dir, self.d_basefp[idx]))
img, label = carr[0], carr[1]
img = gray2rgb(img)
auged = self.aug(image=img, mask=label)
auged['fname'] = self.d_basefp[idx]
# img = auged['image']
# print(img.shape)
return auged
| 23.554054 | 71 | 0.633964 |
ace636157d3313c30aaf3a97f77773d03e708ac5 | 1,800 | py | Python | components/aws/sagemaker/tests/integration_tests/component_tests/test_model_component.py | kamalmemon/pipelines | 7e68991a2a7bfa767f893facfe58190690ca89ed | [
"Apache-2.0"
] | 1 | 2020-10-13T13:28:42.000Z | 2020-10-13T13:28:42.000Z | components/aws/sagemaker/tests/integration_tests/component_tests/test_model_component.py | kamalmemon/pipelines | 7e68991a2a7bfa767f893facfe58190690ca89ed | [
"Apache-2.0"
] | 4 | 2022-02-14T21:39:59.000Z | 2022-03-08T23:38:00.000Z | components/aws/sagemaker/tests/integration_tests/component_tests/test_model_component.py | kamalmemon/pipelines | 7e68991a2a7bfa767f893facfe58190690ca89ed | [
"Apache-2.0"
] | 2 | 2019-10-15T03:06:15.000Z | 2019-10-15T03:10:39.000Z | import pytest
import os
import utils
from utils import kfp_client_utils
from utils import minio_utils
from utils import sagemaker_utils
@pytest.mark.parametrize(
"test_file_dir",
[
pytest.param(
"resources/config/kmeans-mnist-model", marks=pytest.mark.canary_test
)
],
)
def test_createmodel(kfp_client, experiment_id, sagemaker_client, test_file_dir):
download_dir = utils.mkdir(os.path.join(test_file_dir + "/generated"))
test_params = utils.load_params(
utils.replace_placeholders(
os.path.join(test_file_dir, "config.yaml"),
os.path.join(download_dir, "config.yaml"),
)
)
# Generate random prefix for model name to avoid errors if model with same name exists
test_params["Arguments"]["model_name"] = input_model_name = (
utils.generate_random_string(5) + "-" + test_params["Arguments"]["model_name"]
)
print(f"running test with model_name: {input_model_name}")
_, _, workflow_json = kfp_client_utils.compile_run_monitor_pipeline(
kfp_client,
experiment_id,
test_params["PipelineDefinition"],
test_params["Arguments"],
download_dir,
test_params["TestName"],
test_params["Timeout"],
)
outputs = {"sagemaker-create-model": ["model_name"]}
output_files = minio_utils.artifact_download_iterator(
workflow_json, outputs, download_dir
)
output_model_name = utils.read_from_file_in_tar(
output_files["sagemaker-create-model"]["model_name"]
)
print(f"model_name: {output_model_name}")
assert output_model_name == input_model_name
assert (
sagemaker_utils.describe_model(sagemaker_client, input_model_name) is not None
)
utils.remove_dir(download_dir)
| 30 | 90 | 0.689444 |
ace6369792e9c5551d6e98c0281772c0b23363d5 | 733 | py | Python | ledger/core/tests/test_schema_register.py | berrondo/ledger | 814c151845901f125521b6d0a19479b5e8798440 | [
"MIT"
] | null | null | null | ledger/core/tests/test_schema_register.py | berrondo/ledger | 814c151845901f125521b6d0a19479b5e8798440 | [
"MIT"
] | null | null | null | ledger/core/tests/test_schema_register.py | berrondo/ledger | 814c151845901f125521b6d0a19479b5e8798440 | [
"MIT"
] | 1 | 2021-02-11T00:08:48.000Z | 2021-02-11T00:08:48.000Z | from ledger.core.models import (
Transaction as T,
Schema,
)
from ..calculations import Percentual
from ..register import SchemaRegister as R
def test():
# creating a Transaction by naming it automatically
# registers its Schema to be reused:
T('VendaComplicada',
amount=100)
T('ImpostoInjusto',
amount=Percentual(10))
assert Schema.objects.all().count() == 2
assert R.VendaComplicada
assert type(R.VendaComplicada) == T
assert R.VendaComplicada.amount == 100
assert R.ImpostoInjusto
assert type(R.ImpostoInjusto) == T
assert R.ImpostoInjusto.amount == Percentual(10)
#using:
VendaComplicada = \
R.VendaComplicada(
R.ImpostoInjusto
)
| 22.212121 | 55 | 0.673943 |
ace637ad886b6f26d1912692d7ee183f8e5ed757 | 971 | py | Python | Blockchain/boot.py | mayfieldmobster/DECI | c2c9165aeec7344048dd9479049dc490033881d5 | [
"MIT"
] | 1 | 2021-12-19T01:09:12.000Z | 2021-12-19T01:09:12.000Z | Blockchain/boot.py | mayfieldmobster/DECI | c2c9165aeec7344048dd9479049dc490033881d5 | [
"MIT"
] | null | null | null | Blockchain/boot.py | mayfieldmobster/DECI | c2c9165aeec7344048dd9479049dc490033881d5 | [
"MIT"
] | null | null | null | import os
import node
import reciever
import reader
import trans_reader
import validator
import steak_trans
import pre_reader
import concurrent.futures
import socket
"""
update tensorflow
update Blockchain and nodes
"""
open("recent_messages.txt", "w").close()#clear recent message file
local_ip = socket.gethostbyname(socket.gethostname())
os.system("pip3 install --upgrade ecdsa")
"""
try:
os.remove("install.py")
os.remove("install.exe")
except:
pass#wont work after first time ill come up with better way later
"""
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.submit(reciever.rec, local_ip)#start recieving
executor.submit(node.get_blockchain)#update Blockchain
executor.submit(node.get_nodes)#update nodes
executor.submit(reader.read)
executor.submit(trans_reader.read)
executor.submit(steak_trans.updator)
executor.submit(validator.am_i_validator)
executor.submit(pre_reader.read)
| 18.673077 | 69 | 0.762101 |
ace637ef7e88fa303a7060842cd6d0b023210c44 | 1,270 | py | Python | pytest_suit/routes/snapshot/test_listSnapshot.py | enermaps/Hotmaps-toolbox-service | a9a5616e3c6fad081134aadf5ce96b3dcc416bf9 | [
"Apache-2.0"
] | null | null | null | pytest_suit/routes/snapshot/test_listSnapshot.py | enermaps/Hotmaps-toolbox-service | a9a5616e3c6fad081134aadf5ce96b3dcc416bf9 | [
"Apache-2.0"
] | 1 | 2020-10-09T14:09:57.000Z | 2020-10-27T09:27:53.000Z | pytest_suit/routes/snapshot/test_listSnapshot.py | enermaps/Hotmaps-toolbox-service | a9a5616e3c6fad081134aadf5ce96b3dcc416bf9 | [
"Apache-2.0"
] | null | null | null | import unittest
from unittest import TestCase
import requests
from . import BASE_URL, test_config, test_token
url = BASE_URL + "/snapshot/list"
class TestListSnapshot(TestCase):
@unittest.skip("This test keeps failing even though we did not change anything")
def test_post_working(self):
"""
this test will pass the snapshot/list method
"""
payload = {"token": test_token}
output = requests.post(url, json=payload)
expected_output = test_config
assert output.json()["snapshots"][0]["config"] == expected_output
def test_post_missing_parameter(self):
"""
this test will fail because of missing parameters
"""
payload = {
"tokfadsfasden": test_token,
}
output = requests.post(url, json=payload)
expected_status = "531"
assert output.json()["error"]["status"] == expected_status
def test_post_user_unidentified(self):
"""
this test will fail because the used token is wrong
"""
payload = {
"token": "toto",
}
output = requests.post(url, json=payload)
expected_status = "539"
assert output.json()["error"]["status"] == expected_status
| 24.901961 | 84 | 0.613386 |
ace63803d95179bf31fb5161c70eaf28f6025bc0 | 5,103 | py | Python | pug-bot/commands/checks.py | stevenktruong/pug-bot | 315c21363eebb51d67d5b5c9fa9326cd8bcb2b54 | [
"MIT"
] | 17 | 2018-06-27T03:49:03.000Z | 2021-04-13T07:32:43.000Z | pug-bot/commands/checks.py | stevenktruong/pug-bot | 315c21363eebb51d67d5b5c9fa9326cd8bcb2b54 | [
"MIT"
] | 3 | 2020-03-26T06:49:10.000Z | 2020-04-23T07:20:41.000Z | pug-bot/commands/checks.py | stevenktruong/pug-bot | 315c21363eebb51d67d5b5c9fa9326cd8bcb2b54 | [
"MIT"
] | 14 | 2018-06-27T03:49:06.000Z | 2021-10-07T23:28:44.000Z | """
This module creates a decorator called `check` which takes a list of functions to run
If any of these checks fail, the decorated function will not run and an error message
is sent to the Discord channel instead
"""
from ..config import *
from ..utils import find_in_list
# The first parameters of commands should be
# message, pugs
# in that order.
def check(*args):
"""
`args` should be a list of functions
"""
def wrapper(function):
# This is the function to replace the decorated function
async def wrapped(message, pugs, user_input, client=None):
for check in args:
error = check(message, pugs, user_input)
if not error == None:
return await message.channel.send(error)
await function(message, pugs, user_input, client)
return wrapped
return wrapper
def input_too_long(message, pugs, user_input):
if len(message.content) > 100:
return INPUT_TOO_LONG
def already_have_pug(message, pugs, user_input):
owned_pug = find_in_list(lambda pug: pug.owner == message.author, pugs)
if owned_pug:
return ALREADY_HAVE_PUG
def have_no_pug(message, pugs, user_input):
owned_pug = find_in_list(lambda pug: pug.owner == message.author, pugs)
if not owned_pug:
return HAVE_NO_PUG
# You need to check `have_no_pug` before checking this
def pug_already_started(message, pugs, user_input):
owned_pug = find_in_list(lambda pug: pug.owner == message.author, pugs)
if owned_pug.active == PUG_STARTED:
return PUG_ALREADY_STARTED
# You need to check `have_no_pug` before checking this
def pug_already_stopped(message, pugs, user_input):
owned_pug = find_in_list(lambda pug: pug.owner == message.author, pugs)
if owned_pug.active == PUG_STOPPED:
return PUG_ALREADY_STOPPED
def already_in_pug(message, pugs, user_input):
existing_pug = find_in_list(lambda pug: message.author in pug.players, pugs)
if existing_pug:
return ALREADY_IN_PUG
def not_in_pug(message, pugs, user_input):
existing_pug = find_in_list(lambda pug: message.author in pug.players, pugs)
if not existing_pug:
return NOT_IN_PUG
def pug_doesnt_exist(message, pugs, user_input):
pug_name = user_input["arguments"]
existing_pug = find_in_list(lambda pug: pug.name == pug_name, pugs)
if not existing_pug:
return PUG_DOESNT_EXIST
def pug_has_no_teams(message, pugs, user_input):
owned_pug = find_in_list(lambda pug: pug.owner == message.author, pugs)
if not owned_pug.teams:
return PUG_HAS_NO_TEAMS
def channels_not_picked(message, pugs, user_input):
owned_pug = find_in_list(lambda pug: pug.owner == message.author, pugs)
if not all(team.channel for team in owned_pug.teams):
return CHANNELS_NOT_PICKED
def invalid_number(message, pugs, user_input):
arguments = user_input["arguments"].split()
# Attempt to cast the input as an int
try:
int(arguments[-1])
except:
return INVALID_NUMBER
def invalid_number_multiple(message, pugs, user_input):
arguments = user_input["arguments"].split()
for argument in arguments:
try:
int(argument)
except:
return INVALID_NUMBER
def invalid_range(message, pugs, user_input):
arguments = user_input["arguments"].split()
# Attempt to cast the input as an int
try:
int(arguments[0])
int(arguments[1])
except:
return INVALID_NUMBER
if int(arguments[0]) > int(arguments[1]):
return INVALID_RANGE
# You need to check `invalid_number` before checking this
def not_enough_players(message, pugs, user_input):
owned_pug = find_in_list(lambda pug: pug.owner == message.author, pugs)
arguments = user_input["arguments"].split()
# If there would be more teams than players
if int(arguments[-1]) > len(owned_pug.players):
return NOT_ENOUGH_PLAYERS
# You need to check `invalid_number` before checking this
def non_negative_number(message, pugs, user_input):
arguments = user_input["arguments"].split()
# If there would be more teams than players
if int(arguments[-1]) <= 0:
return NON_NEGATIVE_NUMBER
# You need to check `not_in_pug` before checking this
def team_already_exists(message, pugs, user_input):
team_name = user_input["arguments"]
existing_pug = find_in_list(lambda pug: message.author in pug.players, pugs)
if team_name in map(lambda team: team.name, existing_pug.teams):
return TEAM_ALREADY_EXISTS
# You need to check `not_in_pug` before checking this
def already_in_team(message, pugs, user_input):
existing_pug = find_in_list(lambda pug: message.author in pug.players, pugs)
if existing_pug.find_team(message.author):
return ALREADY_IN_TEAM
# You need to check `not_in_pug` before checking this
def not_a_captain(message, pugs, user_input):
existing_pug = find_in_list(lambda pug: message.author in pug.players, pugs)
if not existing_pug.is_captain(message.author):
return NOT_A_CAPTAIN | 34.952055 | 85 | 0.708995 |
ace6392aa263e667cf33191a8324c4eb64cd0203 | 213 | py | Python | output/models/ms_data/datatypes/facets/notation/notation_pattern001_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/datatypes/facets/notation/notation_pattern001_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/datatypes/facets/notation/notation_pattern001_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.datatypes.facets.notation.notation_pattern001_xsd.notation_pattern001 import (
BuildNotation,
FooType,
Test,
)
__all__ = [
"BuildNotation",
"FooType",
"Test",
]
| 17.75 | 105 | 0.699531 |
ace63f3fbed7342fee34a3329ae27d8ed4c90edb | 556 | py | Python | scripts/preprocess_abc_lstm.py | paveyry/PFE-Music-Generation | 9febecaa22540a134a1125d9de876c13d14470b7 | [
"MIT"
] | 12 | 2016-05-29T15:41:37.000Z | 2022-03-29T15:56:22.000Z | scripts/preprocess_abc_lstm.py | paveyry/PFE-Music-Generation | 9febecaa22540a134a1125d9de876c13d14470b7 | [
"MIT"
] | 8 | 2019-10-25T06:39:07.000Z | 2022-02-01T00:58:13.000Z | scripts/preprocess_abc_lstm.py | paveyry/PFE-Music-Generation | 9febecaa22540a134a1125d9de876c13d14470b7 | [
"MIT"
] | 7 | 2016-05-17T11:33:18.000Z | 2020-07-15T03:23:20.000Z | #! /usr/bin/env python3
import re
import sys
def main():
f = open(sys.argv[1], encoding='utf8')
content = f.read()
#content = re.sub(r'\\\n', '', content, flags=re.UNICODE | re.MULTILINE)
maxline = 0
for line in content.split('\n'):
if '%' not in line and len(line) > maxline:
maxline = len(line)
content2 = ''
for line in content.split('\n'):
if len(line) > 0:
content2 += line + '%' * (maxline - len(line)) + '\n'
sys.stdout.write(content2)
if __name__ == '__main__':
main()
| 25.272727 | 76 | 0.55036 |
ace63f425f57c00b9cc300b3b8b472f97fc79ea9 | 669 | py | Python | sridentify/cli.py | cmollet/sridentify | 77248bd1e474f014ac8951dacd196fd3417c452c | [
"MIT"
] | 12 | 2018-09-25T04:54:29.000Z | 2021-04-19T01:57:24.000Z | sridentify/cli.py | cmollet/epsg_ident | 77248bd1e474f014ac8951dacd196fd3417c452c | [
"MIT"
] | 6 | 2019-02-08T22:22:20.000Z | 2020-06-16T23:57:34.000Z | sridentify/cli.py | cmollet/epsg_ident | 77248bd1e474f014ac8951dacd196fd3417c452c | [
"MIT"
] | 4 | 2018-10-04T06:51:25.000Z | 2020-08-09T09:49:15.000Z | import argparse
import sys
from sridentify import Sridentify
parser = argparse.ArgumentParser(
description="Identify an EPSG code from a .prj file",
)
parser.add_argument(
'prj',
help="The .prj file"
)
parser.add_argument(
'-n',
'--no-remote-api',
action='store_false',
dest='call_remote_api',
help='Do not call the prj2epsg.org API if no match found in the database'
)
def main():
args = parser.parse_args()
sridentifier = Sridentify(mode='cli', call_remote_api=args.call_remote_api)
sridentifier.from_file(args.prj)
srid = sridentifier.get_epsg()
if srid is not None:
sys.stdout.write(str(srid) + '\n')
| 23.068966 | 79 | 0.686099 |
ace640319e1d341c750139525fe20f0f9c664810 | 1,139 | py | Python | ultimanager/teams/models.py | UltiManager/ultimanager-api | 4109ef21d187578ae978c9973e7ad11c88258b35 | [
"MIT"
] | null | null | null | ultimanager/teams/models.py | UltiManager/ultimanager-api | 4109ef21d187578ae978c9973e7ad11c88258b35 | [
"MIT"
] | 60 | 2017-12-24T02:22:09.000Z | 2022-02-10T08:20:13.000Z | ultimanager/teams/models.py | UltiManager/ultimanager-api | 4109ef21d187578ae978c9973e7ad11c88258b35 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
from core.models import SlugModel
class Team(SlugModel):
"""
A team represents an organization that participates in one or more
seasons of play.
"""
description = models.TextField(
blank=True,
help_text=_("Any additional notes about the team."),
verbose_name=_("description"),
)
name = models.CharField(
help_text=_("The name of the team."),
max_length=100,
verbose_name=_("name"),
)
class Meta(SlugModel.Meta):
ordering = ("-time_created",)
verbose_name = _("team")
verbose_name_plural = _("teams")
def __repr__(self):
"""
Returns:
A string that unambiguously describes the team. Intended to
be used for debugging and logging.
"""
return (
f"<teams.Team: id={repr(self.pk)} name={repr(self.name)} "
f"slug={repr(self.slug)}>"
)
def __str__(self):
"""
Returns:
The team's name.
"""
return self.name
| 24.76087 | 71 | 0.574188 |
ace6413712b1b29aba30cec1bb090a1aa56a7a18 | 4,854 | py | Python | tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/imagenet.py | TolyaTalamanov/open_model_zoo | 1697e60712df4ca72635a2080a197b9d3bc24129 | [
"Apache-2.0"
] | 2,201 | 2018-10-15T14:37:19.000Z | 2020-07-16T02:05:51.000Z | tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/imagenet.py | Pandinosaurus/open_model_zoo | 2543996541346418919c5cddfb71e33e2cdef080 | [
"Apache-2.0"
] | 759 | 2018-10-18T07:43:55.000Z | 2020-07-16T01:23:12.000Z | tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/imagenet.py | Pandinosaurus/open_model_zoo | 2543996541346418919c5cddfb71e33e2cdef080 | [
"Apache-2.0"
] | 808 | 2018-10-16T14:03:49.000Z | 2020-07-15T11:41:45.000Z | """
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import numpy as np
from ..config import PathField, BoolField
from ..representation import ClassificationAnnotation
from ..utils import read_txt, get_path, check_file_existence, read_json
from .format_converter import BaseFormatConverter, ConverterReturn, verify_label_map
class ImageNetFormatConverter(BaseFormatConverter):
__provider__ = 'imagenet'
annotation_types = (ClassificationAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'annotation_file': PathField(description="Path to annotation in txt format."),
'labels_file': PathField(
optional=True,
description="Path to file with word description of labels (synset words)."
),
'has_background': BoolField(
optional=True, default=False,
description="Allows to add background label to original labels and"
" convert dataset for 1001 classes instead 1000."
),
'images_dir': PathField(
is_directory=True, optional=True,
description='path to dataset images, used only for content existence check'
),
'dataset_meta_file': PathField(
description='path to json file with dataset meta (e.g. label_map, color_encoding)', optional=True
)
})
return configuration_parameters
def configure(self):
self.annotation_file = self.get_value_from_config('annotation_file')
self.labels_file = self.get_value_from_config('labels_file')
self.has_background = self.get_value_from_config('has_background')
self.images_dir = self.get_value_from_config('images_dir') or self.annotation_file.parent
self.dataset_meta = self.get_value_from_config('dataset_meta_file')
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
annotation = []
content_errors = [] if check_content else None
original_annotation = read_txt(get_path(self.annotation_file))
num_iterations = len(original_annotation)
for image_id, image in enumerate(original_annotation):
image_name, label = image.split()
image_name = Path(image_name).name.split('@')[-1]
if check_content:
if not check_file_existence(self.images_dir / image_name):
content_errors.append('{}: does not exist'.format(self.images_dir / image_name))
label = np.int64(label) if not self.has_background else np.int64(label) + 1
annotation.append(ClassificationAnnotation(image_name, label))
if progress_callback is not None and image_id % progress_interval == 0:
progress_callback(image_id / num_iterations * 100)
return ConverterReturn(annotation, self.get_meta(), content_errors)
@staticmethod
def _create_meta(labels_file, dataset_meta, has_background=False):
meta = {}
label_map = {}
if dataset_meta:
meta = read_json(dataset_meta)
if 'labels' in meta and 'label_map' not in meta:
labels = ['background'] + meta['labels'] if has_background else meta['labels']
label_map = dict(enumerate(labels))
meta['label_map'] = label_map
else:
if 'label_map' in meta:
meta['label_map'] = verify_label_map(meta['label_map'])
return meta
if labels_file:
label_map = {}
for i, line in enumerate(read_txt(get_path(labels_file))):
index_for_label = i if not has_background else i + 1
line = line.strip()
label = line[line.find(' ') + 1:]
label_map[index_for_label] = label
meta['label_map'] = label_map
if has_background:
label_map[0] = 'background'
meta['background_label'] = 0
return meta
def get_meta(self):
meta = self._create_meta(self.labels_file, self.dataset_meta, self.has_background) or None
return meta
| 42.208696 | 113 | 0.652864 |
ace6415960dfefd488069854dec67e41f1f2038a | 993 | py | Python | cruncher-cl.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | 6 | 2021-03-10T16:23:10.000Z | 2022-01-14T04:57:46.000Z | cruncher-cl.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | null | null | null | cruncher-cl.py | Remit/autoscaling-simulator | 091943c0e9eedf9543e9305682a067ab60f56def | [
"MIT"
] | 1 | 2022-01-14T04:57:55.000Z | 2022-01-14T04:57:55.000Z | import sys
import argparse
from cruncher.cruncher import Cruncher
# Sample command line execution:
# python3.6 cruncher-cl.py --confdir "cruncher_conf/experiment_metrics_step_up/"
# comment: on my laptop it needs to be invoked with python without version at the end
parser = argparse.ArgumentParser(description = 'Running the experiments for alternative configurations.')
parser.add_argument('--confdir', dest = 'config_dir',
action = 'store', default = None,
help = 'directory with the configuration files for the experiments')
parser.add_argument('--datadir', dest = 'data_dir',
action = 'store', default = None,
help = 'directory with the data to visualize')
args = parser.parse_args()
if args.config_dir is None:
sys.exit('No configuration directory specified.')
c = Cruncher(args.config_dir)
if args.data_dir is None:
c.run_experiment()
else:
c.set_data_dir(args.data_dir)
c.visualize()
| 31.03125 | 105 | 0.697885 |
ace641f1e1cc86e46cd660fcc8a1e3e22ecf30a1 | 2,632 | py | Python | apn_search/utils/encoders.py | apnarm/django-apn-search | 014e8af7c1417978cf3def96cb57cbda5a9c807d | [
"MIT"
] | null | null | null | apn_search/utils/encoders.py | apnarm/django-apn-search | 014e8af7c1417978cf3def96cb57cbda5a9c807d | [
"MIT"
] | null | null | null | apn_search/utils/encoders.py | apnarm/django-apn-search | 014e8af7c1417978cf3def96cb57cbda5a9c807d | [
"MIT"
] | null | null | null | from django.db.models import Model
class Nothing:
pass
class Encoder(object):
"""
Encodes objects into another structure.
This is very similar to a JSONEncoder. The big difference is that
this will output a dictionary, while a JSONEncoder would output a
JSON string that represents the dictionary.
The original purpose of this class it to generate dictionaries that
can then later on be converted into JSON with very little complexity.
"""
def __init__(self, obj, **extra):
self.obj = obj
self.extra = extra
@property
def method_cache(self):
"""Get the method cache for the current class."""
try:
return type(self)._method_cache
except AttributeError:
result = type(self)._method_cache = {}
return result
def methods(self):
"""
Returns a dictionary mapping of methods and their supported instance
type(s). Subclasses must implement this.
"""
raise NotImplementedError
def encode(self, other_obj=Nothing, **extra):
if other_obj is not Nothing:
return self.__class__(other_obj, **extra).encode()
obj = self.obj
obj_type = type(obj)
try:
method_name = self.method_cache[obj_type]
except KeyError:
if not hasattr(self, '_methods'):
self._methods = self.methods()
for method, types in self._methods.items():
if isinstance(obj, types):
method_name = method.__name__
break
else:
if isinstance(obj, dict):
method_name = 'dict'
elif not isinstance(obj, basestring) and hasattr(obj, '__iter__'):
method_name = 'list'
else:
method_name = 'default'
self.method_cache[obj_type] = method_name
method = getattr(self, method_name)
return method()
def default(self):
return self.obj
def dict(self):
return dict(self.dict_items())
def dict_items(self):
for key, value in self.obj.iteritems():
yield key, self.encode(value)
def list(self):
return [self.encode(item) for item in self.obj]
class BasicEncoder(Encoder):
def methods(self):
return {}
def default(self):
if hasattr(self.obj, '__json__'):
return self.obj.__json__()
elif isinstance(self.obj, Model):
return unicode(self.obj)
else:
return self.obj
| 26.059406 | 82 | 0.581307 |
ace6420b044b69d7ca6dc643674e57306f6a0067 | 488 | py | Python | tests/objects/test_hook.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 7 | 2019-03-11T16:17:33.000Z | 2020-10-22T21:57:51.000Z | tests/objects/test_hook.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 3 | 2019-11-05T20:22:16.000Z | 2019-12-11T17:09:04.000Z | tests/objects/test_hook.py | mobidevke/py-fineract | 712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd | [
"Apache-2.0"
] | 2 | 2020-11-19T16:00:36.000Z | 2021-11-19T09:36:13.000Z | import json
import pytest
from fineract.objects.hook import Hook
@pytest.mark.parametrize('filename, klass', [
('hook', Hook),
])
def test_hook_object(filename, klass):
with open('tests/files/{}.json'.format(filename), 'r') as in_file:
data = json.load(in_file)
o = klass(None, data, False)
assert isinstance(o.id, int)
assert isinstance(o.config, list)
assert isinstance(o.events, list)
assert o.events
assert o.config
| 24.4 | 70 | 0.645492 |
ace6434cf291a833e737aa7df4cbc1bc1fbe6d62 | 4,013 | py | Python | winsys-datacollector.py | zainkai/helpful-scripts | 4c7e104ec05b664020da1383edda361c17f96b3e | [
"MIT"
] | null | null | null | winsys-datacollector.py | zainkai/helpful-scripts | 4c7e104ec05b664020da1383edda361c17f96b3e | [
"MIT"
] | null | null | null | winsys-datacollector.py | zainkai/helpful-scripts | 4c7e104ec05b664020da1383edda361c17f96b3e | [
"MIT"
] | null | null | null | """
Author: Kevin Turkington (Zainkai)
Date: 2/17/2018
Class: CS 373 (Defense Against the Dark Arts)
dependencies:
- wmi
- pefile
- psutil
- pypiwin32
"""
import wmi, sys, psutil, os, pefile
def listRunningProcs():
"""
List all running processes
"""
print("listing processes...")
winApi = wmi.WMI()
for process in winApi.Win32_Process():
print(process.ProcessId, process.Name)
print("\tthread count:", process.ThreadCount)
#print(process)
def listProcThreads(pid): #pid must be a string
"""
Lists all threads for a specfic process
PID: must be a valid running process
"""
winApi = wmi.WMI()
print("finding threads, this will take a second...")
for thread in winApi.Win32_Thread(ProcessHandle=pid):
print(thread)
def listThreads():
"""
Lists all threads for all processes
"""
winApi = wmi.WMI()
print("finding threads, this will take a second...")
for thread in winApi.Win32_Thread():
print(thread, flush=True)
def listModules():
"""
Lists all running modules (DLLs)
"""
p = psutil.Process( os.getpid() )
for dll in p.memory_maps():
print(dll.path, flush=True)
def listTextAddr(moduleName):
"""
Lists all text sections for a module
"""
#example moduleName
# C:\\Windows\\SysWOW64\\ntdll.dll
try:
pe = pefile.PE(moduleName)
for section in pe.sections:
if b'.text' in section.Name:
print("Module: ", moduleName, "section: ",section.Name, "addr: ", hex(section.Misc_PhysicalAddress), flush=True)
#print(section)
except:
print("Module not found.")
print("module name format: C:/Windows/SysWOW64/ntdll.dll")
def listPEAddrs():
"""
lists all text sections for all modules
"""
p = psutil.Process( os.getpid() )
for dll in p.memory_maps():
try:
listTextAddr(dll.path)
except:
pass
def getDataAtDLLNAME(moduleName):
"""
Displays the hex data for a specfic module
"""
try:
pe = pefile.PE(moduleName)
for section in pe.sections:
print(section.Name)
print("----------------------------------")
print(section.get_data(), flush=True)
print("----------------------------------")
except:
print("Module not found.")
print("module name format: C:/Windows/SysWOW64/ntdll.dll")
if __name__ == "__main__":
if "-h" in sys.argv or "-help" in sys.argv:
print("-h -help : to display help text")
print("-lrp : to list running processes")
print("-lt <pid> : list all threads or threads of a specfic pid")
print("-lm : list all loaded modules")
print("-la <moduleName> : show address of executatble memory for all or one module")
print("-gd <moduleName> : show hex data in memory for a module")
elif "-lrp" in sys.argv: # problem 1 list running processes
listRunningProcs()
elif "-lt" in sys.argv: # problem 2 list threads
ARGS = sys.argv[2:]
if len(ARGS) == 0:
listThreads()
else:
listProcThreads(ARGS[0])
print("If nothing has printed Pid not longer exists")
elif "-lm" in sys.argv: # problem 3 list all loaded modules (DLLs)
listModules()
elif "-la" in sys.argv: # problem 4 show all the executable pages within the processes (.text sections of PE)
ARGS = sys.argv[2:]
if len(ARGS) == 0:
listPEAddrs()
else:
newModuleName = (ARGS[0]).replace("/","\\\\")
#print(newModuleName)
listTextAddr(newModuleName)
elif "-gd" in sys.argv and len(sys.argv[2:]) == 1: # problem 5 Gives us a capability to read the memory
ARGS = sys.argv[2:]
newModuleName = (ARGS[0]).replace("/","\\\\")
getDataAtDLLNAME(newModuleName)
else:
print("Invalid Command")
| 31.351563 | 128 | 0.581859 |
ace644110cb9a8cfb1b94b2aa740a7b925876063 | 437 | py | Python | compiled/construct/nav_parent_switch.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 4 | 2017-04-08T12:55:11.000Z | 2020-12-05T21:09:31.000Z | compiled/construct/nav_parent_switch.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 7 | 2018-04-23T01:30:33.000Z | 2020-10-30T23:56:14.000Z | compiled/construct/nav_parent_switch.py | smarek/ci_targets | c5edee7b0901fd8e7f75f85245ea4209b38e0cb3 | [
"MIT"
] | 6 | 2017-04-08T11:41:14.000Z | 2020-10-30T22:47:31.000Z | from construct import *
from construct.lib import *
nav_parent_switch__element_1 = Struct(
'foo' / Int8ub,
'subelement' / LazyBound(lambda: nav_parent_switch__subelement_1),
)
nav_parent_switch__subelement_1 = Struct(
'bar' / If(this._.foo == 66, Int8ub),
)
nav_parent_switch = Struct(
'category' / Int8ub,
'content' / Switch(this.category, {1: LazyBound(lambda: nav_parent_switch__element_1), }),
)
_schema = nav_parent_switch
| 23 | 91 | 0.750572 |
ace64453f94b2b165e06adc38398955947e33ccc | 380 | py | Python | note/migrations/0007_note_is_archived.py | triedapps/note-web-app | 423884aa7a2a406e8c647f79b542efc4c017b495 | [
"MIT"
] | 6 | 2020-10-24T03:08:43.000Z | 2022-02-17T15:20:43.000Z | note/migrations/0007_note_is_archived.py | triedapps/note-web-app | 423884aa7a2a406e8c647f79b542efc4c017b495 | [
"MIT"
] | null | null | null | note/migrations/0007_note_is_archived.py | triedapps/note-web-app | 423884aa7a2a406e8c647f79b542efc4c017b495 | [
"MIT"
] | 4 | 2020-12-14T08:30:58.000Z | 2022-02-04T07:10:36.000Z | # Generated by Django 3.1 on 2020-10-15 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('note', '0006_note_datetime'),
]
operations = [
migrations.AddField(
model_name='note',
name='is_archived',
field=models.BooleanField(default=False),
),
]
| 20 | 53 | 0.594737 |
ace645b5622fd9d6e609bc871af09a3047be5e53 | 17,152 | py | Python | ccvpn/views/account.py | CCrypto/ccvpn | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
] | 81 | 2015-03-07T20:26:55.000Z | 2016-05-16T10:22:05.000Z | ccvpn/views/account.py | CCrypto/ccvpn2 | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
] | 1 | 2017-09-21T15:56:31.000Z | 2017-11-30T15:10:56.000Z | ccvpn/views/account.py | CCrypto/ccvpn | 6bbfd01f41816bea905518f302f4cec474fdd221 | [
"MIT"
] | 20 | 2015-03-07T22:36:46.000Z | 2016-04-23T22:47:12.000Z | import datetime
from urllib.parse import urlencode
import transaction
from sqlalchemy import func
from sqlalchemy.orm.exc import NoResultFound
from pyramid.view import view_config, forbidden_view_config
from pyramid.renderers import render, render_to_response
from pyramid.httpexceptions import (
HTTPSeeOther, HTTPMovedPermanently,
HTTPBadRequest, HTTPNotFound, HTTPForbidden, HTTPFound
)
from pyramid_mailer import get_mailer
from pyramid_mailer.message import Message
from ccvpn.models import (
DBSession,
User, Profile, PasswordResetToken, Gateway, VPNSession, Order,
random_access_token
)
# Set in __init__.py from app settings
openvpn_gateway = ''
openvpn_ca = ''
@forbidden_view_config()
def forbidden(request):
_ = request.translate
if not request.user:
return HTTPFound(location=request.route_url('account_login'))
return HTTPForbidden()
@view_config(route_name='account_login', renderer='login.mako')
def login(request):
_ = request.translate
if request.method != 'POST':
return {}
username = request.POST.get('username')
password = request.POST.get('password')
if not username or not password:
request.response.status_code = HTTPBadRequest.code
return {}
user = DBSession.query(User).filter_by(username=username).first()
if not user or not user.is_active or not user.check_password(password):
request.response.status_code = HTTPForbidden.code
request.messages.error(_('Invalid username or password.'))
return {}
user.last_login = datetime.datetime.now()
request.session['uid'] = user.id
request.messages.info(_('Logged in.'))
return HTTPSeeOther(location=request.route_url('account'))
@view_config(route_name='account_logout', permission='logged')
def logout(request):
_ = request.translate
if 'uid' in request.session:
del request.session['uid']
request.messages.info(_('Logged out.'))
return HTTPSeeOther(location=request.route_url('home'))
@view_config(route_name='account_signup', renderer='signup.mako')
def signup(request):
## TODO: seriously needs refactoring
_ = request.translate
if request.method != 'POST':
return {}
errors = []
try:
username = request.POST.get('username')
password = request.POST.get('password')
password2 = request.POST.get('password2')
email = request.POST.get('email')
if not User.validate_username(username):
errors.append(_('Invalid username.'))
if not User.validate_password(password):
errors.append(_('Invalid password.'))
if email and not User.validate_email(email):
errors.append(_('Invalid email address.'))
if password != password2:
errors.append(_('Both passwords do not match.'))
assert not errors
used = User.is_used(username, email)
if used[0] > 0:
errors.append(_('Username already registered.'))
if used[1] > 0 and email:
errors.append(_('E-mail address already registered.'))
assert not errors
with transaction.manager:
u = User(username=username, email=email, password=password)
if request.referrer:
u.referrer_id = request.referrer.id
DBSession.add(u)
DBSession.flush()
dp = Profile(uid=u.id, name='')
DBSession.add(dp)
request.session['uid'] = u.id
return HTTPSeeOther(location=request.route_url('account'))
except AssertionError:
for error in errors:
request.messages.error(error)
fields = ('username', 'password', 'password2', 'email')
request.response.status_code = HTTPBadRequest.code
return {k: request.POST[k] for k in fields}
@view_config(route_name='account_forgot', renderer='forgot_password.mako')
def forgot(request):
_ = request.translate
if request.method != 'POST' or 'username' not in request.POST:
return {}
u = DBSession.query(User) \
.filter_by(username=request.POST['username']) \
.first()
if not u:
request.messages.error(_('Unknown username.'))
request.response.status_code = HTTPBadRequest.code
return {}
if not u.email:
request.messages.error(_('No e-mail address associated with username.'))
request.response.status_code = HTTPBadRequest.code
return {}
token = PasswordResetToken(u)
DBSession.add(token)
DBSession.flush()
mailer = get_mailer(request)
body = render('mail/password_reset.mako', {
'user': u,
'requested_by': request.remote_addr,
'url': request.route_url('account_reset', token=token.token)
}, request=request)
message = Message(subject=_('CCVPN: Password reset request'),
recipients=[u.email],
body=body)
mailer.send(message)
request.messages.info(_('We sent a reset link. Check your emails.'))
return {}
@view_config(route_name='account_reset', renderer='reset_password.mako')
def reset(request):
_ = request.translate
token = DBSession.query(PasswordResetToken) \
.filter_by(token=request.matchdict['token']) \
.first()
if not token or not token.user:
request.messages.error(_('Unknown password reset token.'))
url = request.route_url('account_forgot')
return HTTPMovedPermanently(location=url)
password = request.POST.get('password')
password2 = request.POST.get('password2')
if request.method != 'POST' or not password or not password2:
return {'token': token}
if not User.validate_password(password) or password != password2:
request.messages.error(_('Invalid password.'))
request.response.status_code = HTTPBadRequest.code
return {'token': token}
token.user.set_password(password)
mailer = get_mailer(request)
body = render('mail/password_reset_done.mako', {
'user': token.user,
'changed_by': request.remote_addr,
}, request=request)
message = Message(subject=_('CCVPN: Password changed'),
recipients=[token.user.email],
body=body)
mailer.send(message)
msg = _('You have changed the password for ${user}.',
mapping={'user': token.user.username})
msg += ' ' + _('You can now log in.')
request.messages.info(msg)
DBSession.delete(token)
url = request.route_url('account_login')
return HTTPMovedPermanently(location=url)
@view_config(route_name='account_redirect')
def account_redirect(request):
_ = request.translate
return HTTPMovedPermanently(location=request.route_url('account'))
@view_config(route_name='config', permission='logged')
@view_config(route_name='config_profile', permission='logged')
def config(request):
_ = request.translate
settings = request.registry.settings
domain = settings.get('net_domain', '')
if not domain.startswith('.'):
domain = '.' + domain
gw_countries = [i[0] for i in DBSession.query(Gateway.country).all()]
try:
username = request.matchdict['username']
pname = request.matchdict.get('pname', '')
if request.user.username != username:
# Only allow corrently logged user for now
raise ValueError()
user = request.user
profile = DBSession.query(Profile) \
.filter_by(uid=user.id) \
.filter_by(name=pname) \
.one()
except (ValueError, KeyError, NoResultFound):
return HTTPNotFound()
# Use 'Other / GNU/Linux' as the default OS if no other is set
client_os = profile.client_os or 'other'
params = {
'profile': profile,
'remote': profile.get_vpn_remote(domain),
'use_fragment': (profile.protocol == 'udpl'),
'use_ipv6': (profile.client_os != 'freebox') and not profile.disable_ipv6,
'use_http_proxy': (profile.protocol == 'tcp'),
'use_resolvconf': (profile.client_os == 'ubuntu'),
'openvpn_ca': openvpn_ca,
}
r = render_to_response('config.ovpn.mako', params, request=request)
if 'plain' in request.GET:
r.content_type = 'text/plain'
else:
r.content_type = 'application/x-openvpn-profile'
return r
@view_config(route_name='account', permission='logged',
renderer='account/index.mako')
def account(request):
_ = request.translate
ref_url = 'https://vpn.ccrypto.org/?ref=' + str(request.user.id)
twitter_url = 'https://twitter.com/intent/tweet?'
twitter_args = {
'text': _('Awesome VPN! 2€/0.005BTC per month, with a free 7 days trial!'),
'via': 'CCrypto_VPN',
'url': ref_url,
'related': 'CCrypto_VPN,CCrypto_org'
}
profiles_limit = 10
return {
'ref_url': ref_url,
'twitter_link': twitter_url + urlencode(twitter_args),
'profiles': request.user.profiles,
'limit': profiles_limit,
}
return {}
@view_config(route_name='account', permission='logged',
request_method='POST')
def account_post(request):
_ = request.translate
redirect = HTTPSeeOther(location=request.route_url('account'))
profiles_limit = 10
profile_name = request.POST.get('profilename')
profile_delete = request.POST.get('delete')
if profile_name:
p = Profile()
if not p.validate_name(profile_name):
request.messages.error(_('Invalid name.'))
return redirect
# Check if the name is already used
used = DBSession.query(Profile).filter_by(uid=request.user.id) \
.filter_by(name=profile_name).first()
if used:
request.messages.error(_('Name already used.'))
return redirect
# Check if this user's under the profile number limit
profiles_count = DBSession.query(func.count(Profile.id)) \
.filter_by(uid=request.user.id).scalar()
if profiles_count > profiles_limit:
request.messages.error(_('You have too many profiles.'))
return redirect
p.name = profile_name
p.uid = request.user.id
DBSession.add(p)
DBSession.flush()
return HTTPSeeOther(location=request.route_url('account_profiles_edit', id=p.id))
if profile_delete:
try:
profile_delete = int(profile_delete)
except ValueError:
return redirect
p = DBSession.query(Profile) \
.filter_by(id=int(profile_delete)) \
.filter(Profile.name != '') \
.filter_by(uid=request.user.id) \
.first()
if not p:
request.messages.error(_('Unknown profile.'))
return redirect
DBSession.delete(p)
return redirect
@view_config(route_name='account_profiles_edit', permission='logged',
renderer='account/profiles_edit.mako')
def profiles_edit(request):
_ = request.translate
try:
profile_id = int(request.matchdict['id'])
profile = DBSession.query(Profile).filter_by(id=profile_id) \
.filter_by(uid=request.user.id).one()
except (KeyError, ValueError, NoResultFound):
return HTTPSeeOther(location=request.route_url('account'))
return {
'profile': profile,
'edit_post_url': request.route_url('account_profiles_edit', id=profile.id),
'gw_countries': set(i[0] for i in DBSession.query(Gateway.country).all()),
}
@view_config(route_name='account_profiles_edit', permission='logged',
request_method='POST')
def profiles_edit_post(request):
_ = request.translate
try:
profile_id = int(request.matchdict['id'])
profile = DBSession.query(Profile).filter_by(id=profile_id) \
.filter_by(uid=request.user.id).one()
except (KeyError, ValueError, NoResultFound):
return HTTPSeeOther(location=request.route_url('account'))
redirect = HTTPSeeOther(location=request.route_url('account_profiles_edit',
id=profile.id))
try:
name = request.POST.get('name', '')
client_os = request.POST['client_os']
gateway = request.POST['gateway']
protocol = request.POST['protocol']
disable_ipv6 = 'enable_ipv6' not in request.POST
http_proxy = request.POST.get('use_http_proxy', '')
except (KeyError, ValueError):
return redirect
if protocol not in Profile.PROTOCOLS:
return redirect
if profile.name and name and name != profile.name:
if not profile.validate_name(name):
request.messages.error(_('Invalid name.'))
return redirect
# Check if the name is already used
used = DBSession.query(Profile).filter_by(uid=request.user.id) \
.filter_by(name=name).first()
if used:
request.messages.error(_('Name already used.'))
return redirect
profile.name = name
profile.client_os = client_os
profile.protocol = protocol
profile.disable_ipv6 = disable_ipv6
profile.use_http_proxy = http_proxy
if gateway.startswith('rr_') and len(gateway) == 5:
# rr_<cc> # random in country
cc = gateway[3:]
profile.gateway_country = cc
profile.gateway_id = None
else:
# random
profile.gateway_country = None
profile.gateway_id = None
request.messages.info(_('Saved!'))
return HTTPSeeOther(location=request.route_url('account'))
@view_config(route_name='account_settings', permission='logged',
renderer='account/settings.mako')
def settings(request):
return {}
@view_config(route_name='account_settings', permission='logged',
request_method='POST')
def settings_post(request):
_ = request.translate
try:
password = request.POST['password']
password2 = request.POST['password2']
email = request.POST['email']
except:
return HTTPSeeOther(location=request.route_url('account_settings'))
if password and not request.user.validate_password(password):
request.messages.error(_('Invalid password.'))
password = ''
if password and password != password2:
request.messages.error(_('Both passwords do not match.'))
password = ''
if email and not request.user.validate_email(request.POST['email']):
request.messages.error(_('Invalid email address.'))
email = ''
if email:
# we do not count the current user (because of pre-filled forms)
c = DBSession.query(func.count(User.id)).filter_by(email=email) \
.filter(User.id != request.user.id).scalar()
if c > 0:
request.messages.error(_('E-mail address already registered.'))
email = ''
if password:
request.user.set_password(password)
if email:
request.user.email = email
if password or email:
request.messages.info(_('Saved!'))
return HTTPSeeOther(location=request.route_url('account_settings'))
@view_config(route_name='account_orders', permission='logged',
renderer='account/orders.mako')
def orders(request):
items_count = DBSession.query(func.count(Order.id)) \
.filter_by(uid=request.user.id) \
.filter(Order.close_date != None) \
.scalar()
page_items = 100
pages = int((items_count / page_items) + 0.5)
try:
page = int(request.GET['page'])
except (KeyError, ValueError):
page = 0
offset = page * page_items
orders = DBSession.query(Order).filter_by(uid=request.user.id) \
now = datetime.datetime.now()
orders = orders.filter(
(Order.paid == True) | # Order is paid
((Order.paid == False) & (Order.close_date > now)) | # Or not expired
((Order.paid == False) & (Order.paid_amount > 0)) # Or not fully paid
)
orders = orders.order_by(Order.start_date.desc())
orders = orders.limit(page_items).offset(offset).all()
return {'orders': orders, 'page': page, 'pages': pages}
@view_config(route_name='account_logs', permission='logged',
renderer='account/logs.mako')
def logs(request):
items_count = DBSession.query(func.count(VPNSession.id)) \
.filter_by(user_id=request.user.id) \
.scalar()
page_items = 100
pages = int((items_count / page_items) + 0.5)
try:
page = int(request.GET['page'])
except (KeyError, ValueError):
page = 0
offset = page * page_items
logs = DBSession.query(VPNSession).filter_by(user_id=request.user.id) \
.order_by(VPNSession.connect_date.desc()) \
.limit(page_items).offset(offset)
return {'logs': logs, 'page': page, 'pages': pages}
| 33.5 | 89 | 0.631763 |
ace645b8d891cda86b12b05dcd0293f03d3df125 | 3,652 | py | Python | value.py | QHedgeTech/witables | 4e74400eda9b17c05d4b72cad32b19f60e3c3d6b | [
"Apache-2.0"
] | 4 | 2015-01-15T12:48:14.000Z | 2016-03-28T06:46:50.000Z | value.py | QHedgeTech/witables | 4e74400eda9b17c05d4b72cad32b19f60e3c3d6b | [
"Apache-2.0"
] | null | null | null | value.py | QHedgeTech/witables | 4e74400eda9b17c05d4b72cad32b19f60e3c3d6b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Store the filepath for file manipulation
import os
filepath = os.path.abspath(os.path.dirname(__file__))
# Add the file path to the system path to import framework file.
import sys
if filepath not in sys.path:
sys.path.append(filepath)
# Debug Module.
import cgitb
cgitb.enable()
# Framework module
from framework import *
def errorModal(activeRow,activeCol,msg):
output = '''
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
<h4 class="modal-title">Error loading value row: %s, col: %s</h4>
</div>
<div class="modal-body">
<div class="te">%s</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>''' % (activeRow, activeCol, msg)
# Encode it.
utf8_output = output.encode('utf-8')
return utf8_output
def makeBody(activeFile,activeNode,activeRow,activeCol):
# Open database file.
database = open_file(filepath + databaseDirectory + '/' + activeFile, mode = 'r')
# Get the node with path
table = database.get_node(activeNode)
# If an exception occurs we try again
# If an exception occurs we try again
if type(table) is not Table:
return errorModal(activeRow, activeCol, "Error while loading node %s in file %s" % (activeFile,activeNode))
# Get the value
value = table[int(activeRow)][activeCol]
# Close database file
database.close()
output = '''
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>
<h4 class="modal-title">%s%s row:%s col:%s</h4>
</div>
<div class="modal-body">
<div class="te">%s</div>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-default" data-dismiss="modal">Close</button>
</div>''' % (activeFile, activeNode, activeRow, activeCol, value)
# Encode it.
utf8_output = output.encode('utf-8')
return utf8_output
def application(environ, start_response):
# Process the parameters if any.
parameters = parse_qs(environ.get('QUERY_STRING', ''))
# Test if we've got an active filename.
if 'file' not in parameters.keys():
return errorPage('Missing argument. Value page needs a filename argument.<br>Example: value?file=test.h5&path=/foo/bar&row=0&col=chips', start_response)
# Test if we've got an active filename.
if 'path' not in parameters.keys():
return errorPage('Missing argument. Value page needs a path argument that contains the path of the node.<br>Example: value?file=test.h5&path=/foo/bar&row=0&col=chips', start_response)
# Test if we've got an active filename.
if 'row' not in parameters.keys():
return errorPage('Missing argument. Value page needs a row argument that contains the row number in table.<br>Example: value?file=test.h5&path=/foo/bar&row=0&col=chips', start_response)
# Test if we've got an active filename.
if 'col' not in parameters.keys():
return errorPage('Missing argument. Value page needs a "col" argument that contains the row number in table.<br>Example: value?file=test.h5&path=/foo/bar&row=0&col=chips', start_response)
# Get the filename
activeFile = parameters['file'][0]
# Get the table path
activeNode = parameters['path'][0]
# Get the row number
activeRow = parameters['row'][0]
# Get the column name
activeCol = parameters['col'][0]
# Make answer header.
status = '200 OK'
response_headers = [('Content-Type', 'text/html; charset=utf-8')]
start_response(status, response_headers)
# Return it.
return [makeBody(activeFile,activeNode,activeRow,activeCol)]
#print makeBody('test.h5','/Economics/GDPEUESTQUOGLD','0','QuoteDate')
| 30.433333 | 189 | 0.717689 |
ace646214dfe4e0c95d8b8fc0812ad5ab9a75433 | 344 | py | Python | src/pcc/api/instance/delete_instance.py | ymstmsys/pcc-cli | 8805ecaa39c99092352e22986fbf7dba9bac4efb | [
"Apache-2.0"
] | null | null | null | src/pcc/api/instance/delete_instance.py | ymstmsys/pcc-cli | 8805ecaa39c99092352e22986fbf7dba9bac4efb | [
"Apache-2.0"
] | 1 | 2016-07-22T05:22:32.000Z | 2016-07-22T05:29:16.000Z | src/pcc/api/instance/delete_instance.py | ymstmsys/pcc-cli | 8805ecaa39c99092352e22986fbf7dba9bac4efb | [
"Apache-2.0"
] | 1 | 2019-07-24T05:34:57.000Z | 2019-07-24T05:34:57.000Z | # -*- coding: utf-8 -*-
def command():
return "delete-instance"
def init_argument(parser):
parser.add_argument("--instance-no", required=True)
def execute(requester, args):
instance_no = args.instance_no
parameters = {}
parameters["InstanceNo"] = instance_no
return requester.execute("/DeleteInstance", parameters)
| 21.5 | 59 | 0.69186 |
ace6463c7e34a36568384647fe1487897a883414 | 30,336 | py | Python | test/functional/test_runner.py | nicholascioli/Ravencoin | 0dba04586454b03d4e596cabecdc7626393dd197 | [
"MIT"
] | null | null | null | test/functional/test_runner.py | nicholascioli/Ravencoin | 0dba04586454b03d4e596cabecdc7626393dd197 | [
"MIT"
] | null | null | null | test/functional/test_runner.py | nicholascioli/Ravencoin | 0dba04586454b03d4e596cabecdc7626393dd197 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:RavenTestFramework.main`.
"""
from collections import deque
import argparse
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, GREEN, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
DASH = "- "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
DASH = "- "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
GREEN = ('\033[0m', '\033[0;32m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
EXTENDED_SCRIPTS = [
# These tests are not run by the build process.
# Longest test should go first, to favor running tests in parallel
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 20m vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'feature_fee_estimation.py',
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 5m vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'feature_dbcrash.py',
]
BASE_SCRIPTS= [
# Scripts that are run by the build process.
# Longest test should go first, to favor running tests in parallel
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 2m vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'wallet_backup.py',
'wallet_hd.py',
'p2p_timeouts.py',
'mining_getblocktemplate_longpoll.py',
'feature_maxuploadtarget.py',
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 45s vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'rpc_fundrawtransaction.py',
'wallet_create_tx.py',
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 30s vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'feature_rewards.py',
'wallet_basic.py',
'mempool_limit.py',
'feature_assets.py',
'feature_messaging.py',
'feature_assets_reorg.py',
'feature_assets_mempool.py',
'feature_restricted_assets.py',
'feature_raw_restricted_assets.py',
'wallet_bip44.py',
'mining_prioritisetransaction.py',
'feature_maxreorgdepth.py 4 --height=60 --tip_age=0 --should_reorg=0', # Don't Reorg
'feature_maxreorgdepth.py 3 --height=60 --tip_age=0 --should_reorg=1', # Reorg (low peer count)
'feature_maxreorgdepth.py 4 --height=60 --tip_age=43400 --should_reorg=1', # Reorg (not caught up)
'feature_maxreorgdepth.py 4 --height=59 --tip_age=0 --should_reorg=1', # Reorg (<60)
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 15s vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'rpc_rawtransaction.py',
'rpc_addressindex.py',
'wallet_dump.py',
'mempool_persist.py',
'rpc_timestampindex.py',
'wallet_listreceivedby.py',
'wallet_reorgsrestore.py',
'interface_rest.py',
'wallet_keypool_topup.py',
'wallet_import_rescan.py',
'wallet_abandonconflict.py',
'wallet_groups.py',
'rpc_blockchain.py',
'p2p_feefilter.py',
'p2p_leak.py',
'feature_versionbits_warning.py',
'rpc_spentindex.py',
'feature_rawassettransactions.py',
'feature_sweep.py',
'wallet_importmulti.py',
'wallet_labels.py',
'wallet_import_with_label.py',
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 5s vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'wallet_listtransactions.py',
'feature_minchainwork.py',
'wallet_encryption.py',
'feature_listmyassets.py',
'mempool_reorg.py',
'rpc_txoutproof.py',
'feature_reindex.py',
'rpc_decodescript.py',
'wallet_keypool.py',
'rpc_setban.py',
'wallet_listsinceblock.py',
'wallet_zapwallettxes.py',
'wallet_multiwallet.py',
'interface_zmq.py',
'rpc_invalidateblock.py',
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv Tests less than 3s vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
'rpc_getchaintips.py',
'wallet_txn_clone.py',
'wallet_txn_doublespend.py --mineblock',
'feature_uacomment.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_txindex.py',
'p2p_disconnect_ban.py',
'wallet_importprunedfunds.py',
'rpc_bind.py',
'feature_unique_assets.py',
'rpc_preciousblock.py',
'feature_notifications.py',
'rpc_net.py',
'rpc_misc.py',
'interface_raven_cli.py',
'mempool_resurrect.py',
'rpc_signrawtransaction.py',
'wallet_resendtransactions.py',
'wallet_txn_clone.py --mineblock',
'interface_rpc.py',
'rpc_signmessage.py',
'rpc_deprecated.py',
'wallet_coinbase_category.py',
'wallet_txn_doublespend.py',
'feature_shutdown.py',
'wallet_disable.py',
'interface_http.py',
'mempool_spend_coinbase.py',
'feature_bip68_sequence.py',
'p2p_mempool.py',
'rpc_named_arguments.py',
'rpc_uptime.py',
'rpc_assettransfer.py',
'feature_loadblock.py',
'p2p_leak_tx.py'
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
SKIPPED_TESTS = [
# List of tests that are not going to be run (usually means test is broken)
'example_test.py',
'feature_assumevalid.py',
'feature_cltv.py', #TODO - fix mininode rehash methods to use X16R
'feature_dersig.py', #TODO - fix mininode rehash methods to use X16R
'feature_nulldummy.py', #TODO - fix mininode rehash methods to use X16R
'feature_pruning.py',
'feature_rbf.py',
'feature_segwit.py', #TODO - fix mininode rehash methods to use X16R
'mempool_packages.py',
'mining_basic.py', #TODO - fix mininode rehash methods to use X16R
'p2p_compactblocks.py', #TODO - refactor to assume segwit is always active
'p2p_fingerprint.py', #TODO - fix mininode rehash methods to use X16R
'p2p_segwit.py', #TODO - refactor to assume segwit is always active
'p2p_sendheaders.py', #TODO - fix mininode rehash methods to use X16R
'p2p_unrequested_blocks.py',
'wallet_bumpfee.py', #TODO - Now fails because we removed RBF
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__,
epilog='Help text and arguments for individual test script:', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--ansi', action='store_true', default=sys.stdout.isatty(), help='Use ANSI colors and dots in output (enabled by default when standard output is a TTY)')
parser.add_argument('--combinedlogslen', type=int, default=0, metavar='n', help='On failure, print a log (of length n lines) to the console, combined from the test framework and all test nodes.')
parser.add_argument('--coverage', action='store_true', help='Generate a basic coverage report for the RPC interface.')
parser.add_argument('--exclude', metavar='', help='Specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='Run the extended test suite in addition to the basic tests.')
parser.add_argument('--failfast', action='store_true', help='Stop execution after the first test failure.')
parser.add_argument('--filter', metavar='', help='Filter scripts to run by regular expression.')
parser.add_argument('--force', action='store_true', help='Run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', action='store_true', help='Print help text and exit.')
parser.add_argument('--jobs', type=int, metavar='', default=get_cpu_count(), help='How many test scripts to run in parallel. Default=.' + str(get_cpu_count()))
parser.add_argument('--keepcache', action='store_true', help='The default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous test-run.')
parser.add_argument('--list', action='store_true', help='Print list of tests and exit.')
parser.add_argument('--loop', type=int, metavar='n', default=1, help='Run(loop) the tests n number of times.')
parser.add_argument('--onlyextended', action='store_true', help='Run only the extended test suite.')
parser.add_argument('--quiet', action='store_true', help='Only print results summary and failure logs.')
parser.add_argument('--tmpdirprefix', metavar='', default=tempfile.gettempdir(), help='Root directory for data.')
# Setup colours for ANSI terminals
args, unknown_args = parser.parse_known_args()
if not args.ansi:
global BOLD, GREEN, RED, GREY
BOLD = ("", "")
GREEN = ("", "")
RED = ("", "")
GREY = ("", "")
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
pass_on_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile, encoding="utf8"))
pass_on_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/raven_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
# Don't run tests on Windows by default
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/RavenProject/Ravencoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/RavenProject/Ravencoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
# Check that the build was configured with wallet, utils, and ravend
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_cli = config["components"].getboolean("ENABLE_UTILS")
enable_ravend = config["components"].getboolean("ENABLE_RAVEND")
if not (enable_wallet and enable_cli and enable_ravend):
print("No functional tests to run. Wallet, utils, and ravend must all be enabled")
print("Rerun `configure` with --enable-wallet, --with-cli and --with-daemon and rerun make")
sys.exit(0)
# Loop the running of tests
for i in range(0, args.loop):
print("Test Loop ", i+1, "of ", args.loop)
last_loop = False
if i+1 == args.loop:
last_loop = True
# Build list of tests
test_list = []
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept names with or without a .py extension.
# Specified tests can contain wildcards, but in that case the supplied
# paths should be coherent, e.g. the same path as that provided to call
# test_runner.py. Examples:
# `test/functional/test_runner.py test/functional/wallet*`
# `test/functional/test_runner.py ./test/functional/wallet*`
# `test_runner.py wallet*`
# but not:
# `test/functional/test_runner.py wallet*`
# Multiple wildcards can be passed:
# `test_runner.py tool* mempool*`
for test in tests:
script = test.split("/")[-1]
script = script + ".py" if ".py" not in script else script
if script in ALL_SCRIPTS:
test_list.append(script)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test))
elif args.extended:
# Include extended tests
test_list += ALL_SCRIPTS
else:
# Run base tests only
test_list += BASE_SCRIPTS
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
exclude_tests = [test.split('.py')[0] for test in args.exclude.split(',')]
for exclude_test in exclude_tests:
# Remove <test_name>.py and <test_name>.py --arg from the test list
exclude_list = [test for test in test_list if test.split('.py')[0] == exclude_test]
for exclude_item in exclude_list:
test_list.remove(exclude_item)
if not exclude_list:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if args.filter:
test_list = list(filter(re.compile(args.filter).search, test_list))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
if args.list:
print(ALL_SCRIPTS)
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(
test_list=test_list,
src_dir=config["environment"]["SRCDIR"],
build_dir=config["environment"]["BUILDDIR"],
exeext=config["environment"]["EXEEXT"],
tmpdir=tmpdir,
use_term_control=args.ansi,
jobs=args.jobs,
enable_coverage=args.coverage,
args=pass_on_args,
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
last_loop=last_loop
)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, use_term_control, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, last_loop=False):
# Warn if ravend is already running (unix only)
if args is None:
args = []
try:
if subprocess.check_output(["pidof", "ravend"]) is not None:
print("%sWARNING!%s There is already a ravend process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "RAVEND" not in os.environ:
os.environ["RAVEND"] = build_dir + '/src/ravend' + exeext
os.environ["RAVENCLI"] = build_dir + '/src/raven-cli' + exeext
tests_dir = src_dir + '/test/functional/'
# limit number of jobs to 13
if jobs > 13:
jobs = 13
print("Jobs limited to 13 threads max.")
print("Using: ", jobs, " threads")
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
print("\n----<test_runner>----\n")
print("Error in create_cache.py:\n")
for line in e.output.decode().split('\n'):
print(line)
print('\n')
print(e.returncode)
print('\n')
print("\n----</test_runner>---\n")
raise
#Run Tests
job_queue = TestHandler(
num_tests_parallel=jobs,
tests_dir=tests_dir,
tmpdir=tmpdir,
use_term_control=use_term_control,
test_list=test_list,
flags=flags
)
start_time = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
for _ in range(test_count):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
done_str = "{}/{} - {}{}{}".format(_ + 1, test_count, BOLD[1], test_result.name, BOLD[0])
if test_result.status == "Passed":
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
elif test_result.status == "Skipped":
logging.debug("%s skipped" % done_str)
else:
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
if BOLD[0]:
combined_logs_args += ['--color']
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
if failfast:
logging.debug("Early exit after test failure...")
break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
if coverage:
coverage_passed = coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
else:
coverage_passed = True
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_res: test_res.was_successful, test_results)) and coverage_passed
# This will be a no-op unless failfast is True in which case there may be dangling
# processes which need to be killed.
job_queue.kill_and_join()
if last_loop:
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: %s s\n" % runtime
print(results)
# noinspection PyTypeChecker
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, use_term_control, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.use_term_control = use_term_control
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
test = self.test_list.pop(0)
port_seed = len(self.test_list)
port_seed_arg = ["--portseed={}".format(port_seed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = test.split()
test_dir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), port_seed)
tmpdir_arg = ["--tmpdir={}".format(test_dir)]
self.jobs.append((test,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + port_seed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr),
test_dir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
dot_count = 0
while True:
# Return first proc that finishes
time.sleep(.5)
for job in self.jobs:
(name, start_time, proc, test_dir, log_out, log_err) = job
if int(time.time() - start_time) > 20 * 60:
# Timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(job)
if self.use_term_control:
clear_line = '\r' + (' ' * dot_count) + '\r'
print(clear_line, end='', flush=True)
return TestResult(name, status, int(time.time() - start_time)), test_dir, stdout, stderr
if self.use_term_control:
print('.', end='', flush=True)
dot_count += 1
def kill_and_join(self):
"""Send SIGKILL to all jobs and block until all have ended."""
process = [i[2] for i in self.jobs]
for p in process:
p.kill()
for p in process:
p.wait()
class TestResult:
def __init__(self, name, status, result_time):
self.name = name
self.status = status
self.time = result_time
self.padding = 0
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = GREEN
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
else:
color = BOLD
glyph = DASH
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that no more than `expected_violation_count` of the
test scripts don't start with one of the allowed name prefixes."""
expected_violation_count = 0
# leeway is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
leeway = 1
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if len(bad_script_names) < expected_violation_count:
print("{}HURRAY!{} Number of functional tests violating naming convention reduced!".format(BOLD[1], BOLD[0]))
print("Consider reducing expected_violation_count from %d to %d" % (expected_violation_count, len(bad_script_names)))
elif len(bad_script_names) > expected_violation_count:
print("WARNING: %d tests not meeting naming conventions. Please rename with allowed prefix. (expected %d):" % (len(bad_script_names), expected_violation_count))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= expected_violation_count + leeway, "Too many tests not following naming convention! (%d found, expected: <= %d)" % (len(bad_script_names), expected_violation_count)
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS + SKIPPED_TESTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run:\n%s \nCheck the test lists in test_runner.py." % (BOLD[1], BOLD[0], "\n".join(missed_tests)))
def get_cpu_count():
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError:
return 4
class RPCCoverage:
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `raven-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % command) for command in sorted(uncovered)))
return False
else:
print("All RPC commands covered.")
return True
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as coverage_ref_file:
all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, _, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as coverage_file:
covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| 41.3297 | 204 | 0.637955 |
ace646ba8b9802d246481000c47f75b5656be7f9 | 404 | py | Python | Code/socket_client.py | cocoaswifty/ESP8266 | eb6d4111586ff2841077909ca4a07a997dc30176 | [
"MIT"
] | null | null | null | Code/socket_client.py | cocoaswifty/ESP8266 | eb6d4111586ff2841077909ca4a07a997dc30176 | [
"MIT"
] | null | null | null | Code/socket_client.py | cocoaswifty/ESP8266 | eb6d4111586ff2841077909ca4a07a997dc30176 | [
"MIT"
] | null | null | null | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # IPV4, TCP 通訊協定
s.connect(('localhost', 5438)) # IP, PORT
while True:
msg = input('請輸入訊息:') # 讀取終端機的輸入訊息
s.send(msg.encode('utf-8')) # 轉成utf-8編碼再送出
reply = s.recv(128) # 接收伺服器的回應,最多128位元組
if reply == b'quit': # 回應若是quit則跳出
print('關閉連線')
s.close()
break
print(str(reply)) # 顯示回應內容
| 23.764706 | 71 | 0.606436 |
ace6481e316e4afe4bbf98c4f14fe1ab8e94964f | 4,013 | py | Python | models/coarse_net.py | Nikronic/Deep-Halftoning | 9564c592abf139ccab2791c1dbb354505edab5f9 | [
"MIT"
] | null | null | null | models/coarse_net.py | Nikronic/Deep-Halftoning | 9564c592abf139ccab2791c1dbb354505edab5f9 | [
"MIT"
] | 1 | 2021-11-07T12:13:38.000Z | 2021-11-07T12:13:38.000Z | models/coarse_net.py | Nikronic/Deep-Halftoning | 9564c592abf139ccab2791c1dbb354505edab5f9 | [
"MIT"
] | null | null | null | # %% Import libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.layers import CL, CBL, CE, C
# %% Submodules
class Contract(nn.Module):
def __init__(self, input_channel, output_channel, module='cbl'):
"""
It consists of a CL or CBL followed by a 2x2 MaxPooling operation with stride 2 for down sampling.
:param input_channel: input channel size
:param output_channel: output channel size
:param module: using Convolution->ReLU (CL class) or Convolution->BathNorm->ReLU (CBL class)
Convolution->ELU (CE class) for first layer of Expand (decoder) path
"""
super(Contract, self).__init__()
layers = []
if module == 'cl':
layers.append(CL(input_channel, output_channel, kernel_size=4, stride=2, padding=1))
elif module == 'ce':
layers.append(CE(input_channel, output_channel, kernel_size=4, stride=2, padding=1))
else:
layers.append(CBL(input_channel, output_channel, kernel_size=4, stride=2, padding=1))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
# %%
class Expand(nn.Module):
def __init__(self, input_channel, output_channel, ks=4, s=2):
"""
This path consists of an up sampling of the feature map followed by a
4x4 convolution ("up-convolution" or Transformed Convolution) that halves the number of
feature channels, a concatenation with the correspondingly cropped feature map from Contract phase
:param input_channel: input channel size
:param output_channel: output channel size
"""
super(Expand, self).__init__()
self.layers = CE(input_channel * 2, output_channel, kernel_size=ks, stride=s, padding=1)
def forward(self, x1, x2):
delta_x = x1.size()[2] - x2.size()[2]
delta_y = x1.size()[3] - x2.size()[3]
x2 = F.pad(x2, pad=(delta_x // 2, delta_y // 2, delta_x // 2, delta_y // 2), mode='constant', value=0)
x = torch.cat((x2, x1), dim=1)
x = self.layers(x)
return x
# %% Main CLass
class CoarseNet(nn.Module):
def __init__(self, input_channels=3, output_channels=3):
"""
Implementation of CoarseNet, a modified version of UNet.
(https://arxiv.org/abs/1505.04597 - Convolutional Networks for Biomedical Image Segmentation (Ronneberger et al., 2015))
:param input_channels: number of input channels of input images to network.
:param output_channels: number of output channels of output images of network.
"""
super(CoarseNet, self).__init__()
self.input_channels = input_channels
self.output_channels = output_channels
# Encoder
self.cl0 = Contract(input_channels, 64, module='cl')
self.cbl0 = Contract(64, 128)
self.cbl1 = Contract(128, 256)
self.cbl2 = Contract(256, 512)
self.cl1 = Contract(512, 512, module='cl')
# Decoder
self.ce0 = Contract(512, 512, module='ce')
self.ce1 = Expand(512, 256)
self.ce2 = Expand(256, 128)
self.ce3 = Expand(128, 64)
self.ce4 = Expand(64, 64)
self.ce5 = CE(64, 64, kernel_size=3, stride=1, padding=1)
# final
self.final = C(64, self.output_channels, kernel_size=3, stride=1, padding=1, activation=None)
def forward(self, x):
out = self.cl0(x) # 3>64
out2 = self.cbl0(out) # 64>128
out3 = self.cbl1(out2) # 128>256
out4 = self.cbl2(out3) # 256>512
out5 = self.cl1(out4) # 512>512
in0 = self.ce0(out5)
in1 = self.ce1(out4, in0) # 512>512
in2 = self.ce2(out3, in1) # 512>256
in3 = self.ce3(out2, in2) # 256>128
in4 = self.ce4(out, in3) # 128>64
f = self.ce5(in4)
f = self.final(f)
return f
# %% tests
# z = torch.randn(1, 3, 256, 256)
# model = CoarseNet()
# o = model(z)
| 34.594828 | 128 | 0.61525 |
ace649d08b087b37ee5eb5aa7b20ce01ea8089ee | 4,280 | py | Python | apache-fake-log-gen.py | mihkels/fake-log-generator | 3b6c7489b016dcb548ec841c9d0bcc39e717e065 | [
"Apache-2.0"
] | 4 | 2020-05-14T05:57:21.000Z | 2021-06-08T14:43:57.000Z | apache-fake-log-gen.py | mihkels/fake-log-generator | 3b6c7489b016dcb548ec841c9d0bcc39e717e065 | [
"Apache-2.0"
] | 1 | 2020-11-11T07:20:15.000Z | 2020-11-11T07:20:15.000Z | apache-fake-log-gen.py | mihkels/fake-log-generator | 3b6c7489b016dcb548ec841c9d0bcc39e717e065 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import time
import datetime
import pytz
import numpy
import random
import gzip
import zipfile
import sys
import argparse
from faker import Faker
from random import randrange
from tzlocal import get_localzone
import log_write_sleep
local = get_localzone()
# todo:
# allow writing different patterns (Common Log, Apache Error log etc)
# log rotation
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
parser = argparse.ArgumentParser(__file__, description="Fake Apache Log Generator")
parser.add_argument("--output", "-o", dest='output_type', help="Write to a Log file, a gzip file or to STDOUT",
choices=['LOG', 'GZ', 'CONSOLE'])
parser.add_argument("--num", "-n", dest='num_lines', help="Number of lines to generate (0 for infinite)", type=int,
default=1)
parser.add_argument("--prefix", "-p", dest='file_prefix', help="Prefix the output file name", type=str)
parser.add_argument("--sleep", "-s", help="Sleep this long between lines (in seconds)", default=0.0, type=float)
parser.add_argument('--output-dir', '-d', help='Output directory for log files', default="", type=str)
parser.add_argument('--filename', '-f', help='Log file name', default="", type=str)
parser.add_argument('--min-delay', help='Minimum delay between writes in milliseconds', default=0, type=int)
parser.add_argument('--max-delay', help='Maximum delay between writes in milliseconds', default=0, type=int)
args = parser.parse_args()
log_lines = args.num_lines
file_prefix = args.file_prefix
output_type = args.output_type
min_write_delay = args.min_delay
max_write_delay = args.max_delay
output_filename = args.filename
output_dir = log_write_sleep.write_log_directory(args.output_dir)
faker = Faker()
timestr = time.strftime("%Y%m%d-%H%M%S")
otime = datetime.datetime.now()
if output_filename == '':
outFileName = 'access_log_' + timestr + '.log' if not file_prefix else file_prefix + '_access_log_' + timestr + '.log'
else:
outFileName = output_filename + '.log'
outFileName = output_dir + outFileName
print('Output file: %s', outFileName)
for case in switch(output_type):
if case('LOG'):
f = open(outFileName, 'a')
break
if case('GZ'):
f = gzip.open(outFileName + '.gz', 'w')
break
if case('CONSOLE'): pass
if case():
f = sys.stdout
response = ["200", "404", "500", "301"]
verb = ["GET", "POST", "DELETE", "PUT"]
resources = ["/list", "/wp-content", "/wp-admin", "/explore", "/search/tag/list", "/app/main/posts",
"/posts/posts/explore", "/apps/cart.jsp?appID="]
ualist = [faker.firefox, faker.chrome, faker.safari, faker.internet_explorer, faker.opera]
flag = True
while (flag):
write_sleep = log_write_sleep.write_sleep(min_write_delay, max_write_delay)
if write_sleep != 0:
time.sleep(write_sleep / 1000)
if args.sleep:
increment = datetime.timedelta(seconds=args.sleep)
else:
increment = datetime.timedelta(seconds=random.randint(30, 300))
otime += increment
ip = faker.ipv4()
dt = otime.strftime('%d/%b/%Y:%H:%M:%S')
tz = datetime.datetime.now(local).strftime('%z')
vrb = numpy.random.choice(verb, p=[0.6, 0.1, 0.1, 0.2])
uri = random.choice(resources)
if uri.find("apps") > 0:
uri += repr(random.randint(1000, 10000))
resp = numpy.random.choice(response, p=[0.9, 0.04, 0.02, 0.04])
byt = int(random.gauss(5000, 50))
referer = faker.uri()
useragent = numpy.random.choice(ualist, p=[0.5, 0.3, 0.1, 0.05, 0.05])()
f.write('%s - - [%s %s] "%s %s HTTP/1.0" %s %s "%s" "%s"\n' % (ip, dt, tz, vrb, uri, resp, byt, referer, useragent))
log_lines = log_lines - 1
flag = False if log_lines == 0 else True
if args.sleep:
time.sleep(args.sleep)
| 32.424242 | 122 | 0.653037 |
ace64b52e2481d5aef45462302fa12bef59d5568 | 9,540 | py | Python | src/aws_scanner_config.py | jezd-axyl/platsec-aws-scanner | bc2b064c87ac2f77fab49c1e1eb3782d6de685b2 | [
"Apache-2.0"
] | null | null | null | src/aws_scanner_config.py | jezd-axyl/platsec-aws-scanner | bc2b064c87ac2f77fab49c1e1eb3782d6de685b2 | [
"Apache-2.0"
] | 4 | 2021-05-06T12:36:46.000Z | 2022-02-11T09:47:57.000Z | src/aws_scanner_config.py | jezd-axyl/platsec-aws-scanner | bc2b064c87ac2f77fab49c1e1eb3782d6de685b2 | [
"Apache-2.0"
] | 2 | 2021-04-21T04:48:47.000Z | 2022-01-14T04:29:17.000Z | import os
import sys
from configparser import ConfigParser
from json import JSONDecodeError, loads
from logging import getLogger
from typing import Any, Dict, List
from src.data.aws_iam_types import PasswordPolicy
from src.data.aws_organizations_types import Account
class AwsScannerConfig:
def __init__(self) -> None:
self._logger = getLogger(self.__class__.__name__)
self._config = self._load_config()
def athena_account(self) -> Account:
return Account(self._get_config("athena", "account"), "athena")
def athena_role(self) -> str:
return self._get_config("athena", "role")
def athena_database_prefix(self) -> str:
return self._get_config("athena", "database_prefix")
def athena_query_results_bucket(self) -> str:
return self._get_config("athena", "query_results_bucket")
def athena_query_timeout_seconds(self) -> int:
return self._get_int_config("athena", "query_timeout_seconds")
def athena_query_results_polling_delay_seconds(self) -> int:
return self._get_int_config("athena", "query_results_polling_delay_seconds")
def athena_query_throttling_seconds(self) -> int:
return self._get_int_config("athena", "query_throttling_seconds")
def cloudtrail_account(self) -> Account:
return Account(self._get_config("cloudtrail", "account"), "cloudtrail")
def cloudtrail_event_key_id(self) -> str:
return self._get_config("cloudtrail", "event_key_id")
def cloudtrail_log_group_name(self) -> str:
return self._get_config("cloudtrail", "log_group_name")
def cloudtrail_logs_bucket(self) -> str:
return self._get_config("cloudtrail", "logs_bucket")
def cloudtrail_logs_retention_days(self) -> int:
return self._get_int_config("cloudtrail", "logs_retention_days")
def cloudtrail_region(self) -> str:
return self._get_config("cloudtrail", "region")
def cloudtrail_role(self) -> str:
return self._get_config("cloudtrail", "role")
def cost_explorer_role(self) -> str:
return self._get_config("cost_explorer", "role")
def ec2_role(self) -> str:
return self._get_config("ec2", "role")
def ec2_flow_log_status(self) -> str:
return self._get_config("ec2", "flow_log_status")
def ec2_flow_log_traffic_type(self) -> str:
return self._get_config("ec2", "flow_log_traffic_type")
def ec2_flow_log_format(self) -> str:
return self._get_config("ec2", "flow_log_format")
def iam_role(self) -> str:
return self._get_config("iam", "role")
def iam_audit_role(self) -> str:
return self._get_config("iam", "audit_role")
def iam_password_policy(self) -> PasswordPolicy:
return PasswordPolicy(
minimum_password_length=self.iam_password_policy_minimum_password_length(),
require_symbols=self.iam_password_policy_require_symbols(),
require_numbers=self.iam_password_policy_require_numbers(),
require_uppercase_chars=self.iam_password_policy_require_uppercase_chars(),
require_lowercase_chars=self.iam_password_policy_require_lowercase_chars(),
allow_users_to_change_password=self.iam_password_policy_allow_users_to_change_password(),
expire_passwords=self.iam_password_policy_max_password_age() > 0,
max_password_age=self.iam_password_policy_max_password_age(),
password_reuse_prevention=self.iam_password_policy_password_reuse_prevention(),
hard_expiry=self.iam_password_policy_hard_expiry(),
)
def iam_password_policy_minimum_password_length(self) -> int:
return self._get_int_config("iam", "password_policy_minimum_password_length")
def iam_password_policy_require_symbols(self) -> bool:
return self._get_bool_config("iam", "password_policy_require_symbols")
def iam_password_policy_require_numbers(self) -> bool:
return self._get_bool_config("iam", "password_policy_require_numbers")
def iam_password_policy_require_uppercase_chars(self) -> bool:
return self._get_bool_config("iam", "password_policy_require_uppercase_chars")
def iam_password_policy_require_lowercase_chars(self) -> bool:
return self._get_bool_config("iam", "password_policy_require_lowercase_chars")
def iam_password_policy_allow_users_to_change_password(self) -> bool:
return self._get_bool_config("iam", "password_policy_allow_users_to_change_password")
def iam_password_policy_max_password_age(self) -> int:
return self._get_int_config("iam", "password_policy_max_password_age")
def iam_password_policy_password_reuse_prevention(self) -> int:
return self._get_int_config("iam", "password_policy_password_reuse_prevention")
def iam_password_policy_hard_expiry(self) -> bool:
return self._get_bool_config("iam", "password_policy_hard_expiry")
def kms_role(self) -> str:
return self._get_config("kms", "role")
def logs_vpc_log_group_name(self) -> str:
return self._get_config("logs", "vpc_log_group_name")
def logs_vpc_log_group_subscription_filter_name(self) -> str:
return f"{self.logs_vpc_log_group_name()}_sub_filter"
def logs_vpc_log_group_pattern(self) -> str:
return self._get_config("logs", "vpc_log_group_pattern")
def logs_vpc_log_group_destination(self) -> str:
return self._get_config("logs", "vpc_log_group_destination")
def logs_vpc_log_group_delivery_role(self) -> str:
return self._get_config("logs", "vpc_log_group_delivery_role")
def logs_vpc_log_group_delivery_role_policy(self) -> str:
return self._get_config("logs", "vpc_log_group_delivery_role_policy")
def logs_vpc_log_group_delivery_role_assume_policy(self) -> Dict[str, Any]:
return self._get_json_config("logs", "vpc_log_group_delivery_role_assume_policy")
def logs_vpc_log_group_delivery_role_policy_document(self) -> Dict[str, Any]:
return self._get_json_config("logs", "vpc_log_group_delivery_role_policy_document")
def logs_vpc_log_group_retention_policy_days(self) -> int:
return self._get_int_config("logs", "vpc_log_group_retention_policy_days")
def logs_role(self) -> str:
return self._get_config("logs", "role")
def organization_account(self) -> Account:
return Account(self._get_config("organization", "account"), "organization")
def organization_role(self) -> str:
return self._get_config("organization", "role")
def organization_include_root_accounts(self) -> bool:
return self._get_bool_config("organization", "include_root_accounts")
def organization_parent(self) -> str:
return self._get_config("organization", "parent")
def reports_output(self) -> str:
output = self._get_config("reports", "output")
supported = ["stdout", "s3"]
return output if output.lower() in supported else sys.exit(self._unsupported("reports", "output", supported))
def reports_account(self) -> Account:
return Account(self._get_config("reports", "account"), "reports")
def reports_role(self) -> str:
return self._get_config("reports", "role")
def reports_bucket(self) -> str:
return self._get_config("reports", "bucket")
def s3_role(self) -> str:
return self._get_config("s3", "role")
def session_duration_seconds(self) -> int:
return self._get_int_config("session", "duration_seconds")
def ssm_role(self) -> str:
return self._get_config("ssm", "role")
def tasks_executors(self) -> int:
return self._get_int_config("tasks", "executors")
def user_account(self) -> Account:
return Account(self._get_config("user", "account"), "user")
def user_name(self) -> str:
return self._get_config("user", "name")
def _get_config(self, section: str, key: str) -> str:
try:
return os.environ.get(f"AWS_SCANNER_{section.upper()}_{key.upper()}") or self._config[section][key]
except KeyError:
sys.exit(f"missing config: section '{section}', key '{key}'")
def _get_int_config(self, section: str, key: str) -> int:
try:
return int(self._get_config(section, key))
except ValueError as err:
sys.exit(f"invalid config type: section '{section}', key '{key}', error: {err}")
def _get_bool_config(self, section: str, key: str) -> bool:
return str(self._get_config(section, key)) == "true"
@staticmethod
def _to_json(json_str: str, section: str, key: str) -> Dict[str, Any]:
try:
return dict(loads(json_str))
except JSONDecodeError as err:
sys.exit(f"invalid config: section '{section}', key '{key}', error: {err}")
def _get_json_config(self, section: str, key: str) -> Dict[str, Any]:
return self._to_json(self._get_config(section, key), section, key)
def _load_config(self) -> ConfigParser:
config = ConfigParser()
file_name = os.environ.get("AWS_SCANNER_CONFIG_FILE_NAME", "aws_scanner_config.ini")
if not config.read(file_name):
self._logger.debug("Config file 'aws_scanner_config.ini' not found, using environment variables instead")
return config
@staticmethod
def _unsupported(section: str, key: str, supported: List[str]) -> str:
return f"unsupported config: section '{section}', key '{key}' (should be one of {supported})"
| 40.769231 | 117 | 0.704822 |
ace64b7bf3817462d106fde2688e3b4785b71fcb | 5,190 | py | Python | docs/conf.py | MZ-Makos/geometry_analysis | 2c83543ba3ce48d9aa24be9e10624ba37aebc860 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | MZ-Makos/geometry_analysis | 2c83543ba3ce48d9aa24be9e10624ba37aebc860 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | MZ-Makos/geometry_analysis | 2c83543ba3ce48d9aa24be9e10624ba37aebc860 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'geometry_analysis'
copyright = ("2019, Malgorzata Z Makos. Project structure based on the "
"Computational Molecular Science Python Cookiecutter version 1.0")
author = 'Malgorzata Z Makos'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'geometry_analysisdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'geometry_analysis.tex', 'geometry_analysis Documentation',
'geometry_analysis', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'geometry_analysis', 'geometry_analysis Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'geometry_analysis', 'geometry_analysis Documentation',
author, 'geometry_analysis', 'A python package for the MolSSI Software Summer School.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
| 31.26506 | 92 | 0.657611 |
ace64c0ce1fba1c7826c1d2c3e88de244171648b | 955 | py | Python | tests/test_codegen_sdaccel.py | hecmay/heterocl | e91eb4841f09385f676615cd246642744d08dd7c | [
"Apache-2.0"
] | 5 | 2019-11-09T14:30:47.000Z | 2020-10-30T04:47:47.000Z | tests/test_codegen_sdaccel.py | hecmay/heterocl | e91eb4841f09385f676615cd246642744d08dd7c | [
"Apache-2.0"
] | null | null | null | tests/test_codegen_sdaccel.py | hecmay/heterocl | e91eb4841f09385f676615cd246642744d08dd7c | [
"Apache-2.0"
] | 2 | 2019-10-01T18:01:09.000Z | 2020-09-15T02:36:59.000Z | import heterocl as hcl
def test_pragma():
hcl.init(hcl.Float())
A = hcl.placeholder((10, 32), "A")
B = hcl.placeholder((10, 32))
C = hcl.compute(A.shape, lambda i, j: A[i][j] + B[i][j])
# unroll
s1 = hcl.create_schedule([A, B, C])
s1[C].unroll(C.axis[1], factor=6)
code1 = hcl.build(s1, target='sdaccel')
print (code1)
assert "__attribute__((opencl_unroll_hint(6)))" in code1
# pipeline
s2 = hcl.create_schedule([A, B, C])
s2[C].pipeline(C.axis[0], initiation_interval=2)
code2 = hcl.build(s2, target='sdaccel')
print (code2)
assert "__attribute__((xcl_pipeline_loop(2)))" in code2
# partition
s3 = hcl.create_schedule([A, B, C])
s3.partition(A, hcl.Partition.Block, dim=2, factor=2)
code3 = hcl.build(s3, target='sdaccel')
print (code3)
assert "__attribute__((xcl_array_partition(block,2,2)))" in code3
if __name__ == "__main__":
test_pragma()
| 29.84375 | 70 | 0.620942 |
ace64c517eb2a99f051ce10582b62af568d2d9fe | 25,254 | py | Python | zhaquirks/xbee/__init__.py | Brazen00/zha-device-handlers | a4c6cdb9e531b9f643567c32e8cd8bacc64840cd | [
"Apache-2.0"
] | 2 | 2022-01-09T05:11:07.000Z | 2022-01-24T03:19:27.000Z | zhaquirks/xbee/__init__.py | Brazen00/zha-device-handlers | a4c6cdb9e531b9f643567c32e8cd8bacc64840cd | [
"Apache-2.0"
] | 1 | 2021-12-06T10:01:38.000Z | 2021-12-06T10:01:38.000Z | zhaquirks/xbee/__init__.py | Brazen00/zha-device-handlers | a4c6cdb9e531b9f643567c32e8cd8bacc64840cd | [
"Apache-2.0"
] | 1 | 2022-03-15T21:05:38.000Z | 2022-03-15T21:05:38.000Z | """Module for xbee devices as remote sensors/switches.
Allows for direct control of an xbee3's digital pins.
Reading pins should work with any coordinator (Untested)
writing pins will only work with an xbee as the coordinator as
it requires zigpy_xbee.
The xbee must be configured via XCTU to send samples to the coordinator,
DH and DL to the coordiator's address (0). and each pin must be configured
to act as a digital input.
Either configure reporting on state change by setting the appropriate bit
mask on IC or set IR to a value greater than zero to send perodic reports
every x milliseconds, I recommend the later, since this will ensure
the xbee stays alive in Home Assistant.
"""
import asyncio
import enum
import logging
from typing import Any, List, Optional, Union
from zigpy.quirks import CustomDevice
import zigpy.types as t
from zigpy.zcl import foundation
from zigpy.zcl.clusters.general import (
AnalogInput,
AnalogOutput,
Basic,
BinaryInput,
LevelControl,
OnOff,
)
from zhaquirks import EventableCluster, LocalDataCluster
from zhaquirks.const import ENDPOINTS, INPUT_CLUSTERS, OUTPUT_CLUSTERS
_LOGGER = logging.getLogger(__name__)
DATA_IN_CMD = 0x0000
DIO_APPLY_CHANGES = 0x02
DIO_PIN_HIGH = 0x05
DIO_PIN_LOW = 0x04
ON_OFF_CMD = 0x0000
XBEE_DATA_CLUSTER = 0x11
XBEE_AT_REQUEST_CLUSTER = 0x21
XBEE_AT_RESPONSE_CLUSTER = 0xA1
XBEE_AT_ENDPOINT = 0xE6
XBEE_DATA_ENDPOINT = 0xE8
XBEE_IO_CLUSTER = 0x92
XBEE_PROFILE_ID = 0xC105
ATTR_ON_OFF = 0x0000
ATTR_PRESENT_VALUE = 0x0055
PIN_ANALOG_OUTPUT = 2
REMOTE_AT_COMMAND_TIMEOUT = 30
class int_t(int):
"""Signed int type."""
_signed = True
def serialize(self):
"""Serialize int_t."""
return self.to_bytes(self._size, "big", signed=self._signed)
@classmethod
def deserialize(cls, data):
"""Deserialize int_t."""
# Work around https://bugs.python.org/issue23640
r = cls(int.from_bytes(data[: cls._size], "big", signed=cls._signed))
data = data[cls._size :]
return r, data
class uint_t(int_t):
"""Unsigned int type."""
_signed = False
class uint8_t(uint_t):
"""Unsigned int 8 bit type."""
_size = 1
class int16_t(int_t):
"""Signed int 16 bit type."""
_size = 2
class uint16_t(uint_t):
"""Unsigned int 16 bit type."""
_size = 2
class uint32_t(uint_t):
"""Unsigned int 32 bit type."""
_size = 4
class uint64_t(uint_t):
"""Unsigned int 64 bit type."""
_size = 8
class Bytes(bytes):
"""Bytes serializable class."""
def serialize(self):
"""Serialize Bytes."""
return self
@classmethod
def deserialize(cls, data):
"""Deserialize Bytes."""
return cls(data), b""
# https://github.com/zigpy/zigpy-xbee/blob/dev/zigpy_xbee/api.py
AT_COMMANDS = {
# Addressing commands
"DH": uint32_t,
"DL": uint32_t,
"MY": uint16_t,
"MP": uint16_t,
"NC": uint32_t, # 0 - MAX_CHILDREN.
"SH": uint32_t,
"SL": uint32_t,
"NI": Bytes, # 20 byte printable ascii string
# "SE": uint8_t,
# "DE": uint8_t,
# "CI": uint16_t,
"TO": uint8_t,
"NP": uint16_t,
"DD": uint32_t,
"CR": uint8_t, # 0 - 0x3F
# Networking commands
"CH": uint8_t, # 0x0B - 0x1A
"DA": None, # no param
# "ID": uint64_t,
"OP": uint64_t,
"NH": uint8_t,
"BH": uint8_t, # 0 - 0x1E
"OI": uint16_t,
"NT": uint8_t, # 0x20 - 0xFF
"NO": uint8_t, # bitfield, 0 - 3
"SC": uint16_t, # 1 - 0xFFFF
"SD": uint8_t, # 0 - 7
# "ZS": uint8_t, # 0 - 2
"NJ": uint8_t,
"JV": t.Bool,
"NW": uint16_t, # 0 - 0x64FF
"JN": t.Bool,
"AR": uint8_t,
"DJ": t.Bool, # WTF, docs
"II": uint16_t,
# Security commands
# "EE": t.Bool,
# "EO": uint8_t,
# "NK": Bytes, # 128-bit value
# "KY": Bytes, # 128-bit value
# RF interfacing commands
"PL": uint8_t, # 0 - 4 (basically an Enum)
"PM": t.Bool,
"DB": uint8_t,
"PP": uint8_t, # RO
"AP": uint8_t, # 1-2 (an Enum)
"AO": uint8_t, # 0 - 3 (an Enum)
"BD": uint8_t, # 0 - 7 (an Enum)
"NB": uint8_t, # 0 - 3 (an Enum)
"SB": uint8_t, # 0 - 1 (an Enum)
"RO": uint8_t,
"D6": uint8_t, # 0 - 5 (an Enum)
"D7": uint8_t, # 0 - 7 (an Enum)
"P3": uint8_t, # 0 - 5 (an Enum)
"P4": uint8_t, # 0 - 5 (an Enum)
# I/O commands
"IR": uint16_t,
"IC": uint16_t,
"D0": uint8_t, # 0 - 5 (an Enum)
"D1": uint8_t, # 0 - 5 (an Enum)
"D2": uint8_t, # 0 - 5 (an Enum)
"D3": uint8_t, # 0 - 5 (an Enum)
"D4": uint8_t, # 0 - 5 (an Enum)
"D5": uint8_t, # 0 - 5 (an Enum)
"D8": uint8_t, # 0 - 5 (an Enum)
"D9": uint8_t, # 0 - 5 (an Enum)
"P0": uint8_t, # 0 - 5 (an Enum)
"P1": uint8_t, # 0 - 5 (an Enum)
"P2": uint8_t, # 0 - 5 (an Enum)
"P5": uint8_t, # 0 - 5 (an Enum)
"P6": uint8_t, # 0 - 5 (an Enum)
"P7": uint8_t, # 0 - 5 (an Enum)
"P8": uint8_t, # 0 - 5 (an Enum)
"P9": uint8_t, # 0 - 5 (an Enum)
"LT": uint8_t,
"PR": uint16_t,
"RP": uint8_t,
"%V": uint16_t, # read only
"V+": uint16_t,
"TP": int16_t,
"M0": uint16_t, # 0 - 0x3FF
"M1": uint16_t, # 0 - 0x3FF
# Diagnostics commands
"VR": uint16_t,
"HV": uint16_t,
"AI": uint8_t,
# AT command options
"CT": uint16_t, # 2 - 0x028F
"CN": None,
"GT": uint16_t,
"CC": uint8_t,
# Sleep commands
"SM": uint8_t,
"SN": uint16_t,
"SP": uint16_t,
"ST": uint16_t,
"SO": uint8_t,
"WH": uint16_t,
"SI": None,
"PO": uint16_t, # 0 - 0x3E8
# Execution commands
"AC": None,
"WR": None,
"RE": None,
"FR": None,
"NR": t.Bool,
"SI": None,
"CB": uint8_t,
"DN": Bytes, # "up to 20-Byte printable ASCII string"
"IS": None,
"1S": None,
"AS": None,
# Stuff I've guessed
# "CE": uint8_t,
}
# 4 AO lines
# 10 digital
# Discovered endpoint information: <SimpleDescriptor endpoint=232 profile=49413
# device_type=1 device_version=0 input_clusters=[] output_clusters=[]>
ENDPOINT_TO_AT = {
0xD0: "D0",
0xD1: "D1",
0xD2: "D2",
0xD3: "D3",
0xD4: "D4",
0xD5: "D5",
0xD6: "D6",
0xD7: "D7",
0xD8: "D8",
0xD9: "D9",
0xDA: "P0",
0xDB: "P1",
0xDC: "P2",
0xDD: "P3",
0xDE: "P4",
}
class XBeeBasic(LocalDataCluster, Basic):
"""XBee Basic Cluster."""
def __init__(self, endpoint, is_server=True):
"""Set default values and store them in cache."""
super().__init__(endpoint, is_server)
self._update_attribute(0x0000, 0x02) # ZCLVersion
self._update_attribute(0x0007, self.PowerSource.Unknown) # PowerSource
class XBeeOnOff(LocalDataCluster, OnOff):
"""XBee on/off cluster."""
async def command(
self, command_id, *args, manufacturer=None, expect_reply=True, tsn=None
):
"""Xbee change pin state command, requires zigpy_xbee."""
pin_name = ENDPOINT_TO_AT.get(self._endpoint.endpoint_id)
if command_id not in [0, 1] or pin_name is None:
return super().command(command_id, *args)
if command_id == 0:
pin_cmd = DIO_PIN_LOW
else:
pin_cmd = DIO_PIN_HIGH
await self._endpoint.device.remote_at(pin_name, pin_cmd)
self._update_attribute(ATTR_ON_OFF, command_id)
return 0, foundation.Status.SUCCESS
class XBeeAnalogInput(LocalDataCluster, AnalogInput):
"""XBee Analog Input Cluster."""
pass
class XBeePWM(LocalDataCluster, AnalogOutput):
"""XBee PWM Cluster."""
_ep_id_2_pwm = {0xDA: "M0", 0xDB: "M1"}
def __init__(self, endpoint, is_server=True):
"""Set known attributes and store them in cache."""
super().__init__(endpoint, is_server)
self._update_attribute(0x0041, float(0x03FF)) # max_present_value
self._update_attribute(0x0045, 0.0) # min_present_value
self._update_attribute(0x0051, 0) # out_of_service
self._update_attribute(0x006A, 1.0) # resolution
self._update_attribute(0x006F, 0x00) # status_flags
async def write_attributes(self, attributes, manufacturer=None):
"""Intercept present_value attribute write."""
attr_id = None
if ATTR_PRESENT_VALUE in attributes:
attr_id = ATTR_PRESENT_VALUE
elif "present_value" in attributes:
attr_id = "present_value"
if attr_id:
duty_cycle = int(round(float(attributes[attr_id])))
at_command = self._ep_id_2_pwm.get(self._endpoint.endpoint_id)
await self._endpoint.device.remote_at(at_command, duty_cycle)
at_command = ENDPOINT_TO_AT.get(self._endpoint.endpoint_id)
await self._endpoint.device.remote_at(at_command, PIN_ANALOG_OUTPUT)
return await super().write_attributes(attributes, manufacturer)
async def read_attributes_raw(self, attributes, manufacturer=None):
"""Intercept present_value attribute read."""
if ATTR_PRESENT_VALUE in attributes or "present_value" in attributes:
at_command = self._ep_id_2_pwm.get(self._endpoint.endpoint_id)
result = await self._endpoint.device.remote_at(at_command)
self._update_attribute(ATTR_PRESENT_VALUE, float(result))
return await super().read_attributes_raw(attributes, manufacturer)
class XBeeRemoteATRequest(LocalDataCluster):
"""Remote AT Command Request Cluster."""
cluster_id = XBEE_AT_REQUEST_CLUSTER
server_commands = {}
_seq: int = 1
class EUI64(t.EUI64):
"""EUI64 serializable class."""
@classmethod
def deserialize(cls, data):
"""Deserialize EUI64."""
r, data = super().deserialize(data)
return cls(r[::-1]), data
def serialize(self):
"""Serialize EUI64."""
assert self._length == len(self)
return super().serialize()[::-1]
class NWK(int):
"""Network address serializable class."""
_signed = False
_size = 2
def serialize(self):
"""Serialize NWK."""
return self.to_bytes(self._size, "big", signed=self._signed)
@classmethod
def deserialize(cls, data):
"""Deserialize NWK."""
r = cls(int.from_bytes(data[: cls._size], "big", signed=cls._signed))
data = data[cls._size :]
return r, data
def __init__(self, *args, **kwargs):
"""Generate client_commands from AT_COMMANDS."""
super().__init__(*args, **kwargs)
self.client_commands = {
k: (v[0], (v[1],), None)
for k, v in zip(range(1, len(AT_COMMANDS) + 1), AT_COMMANDS.items())
}
def _save_at_request(self, frame_id, future):
self._endpoint.in_clusters[XBEE_AT_RESPONSE_CLUSTER].save_at_request(
frame_id, future
)
def remote_at_command(self, cmd_name, *args, apply_changes=True, **kwargs):
"""Execute a Remote AT Command and Return Response."""
if hasattr(self._endpoint.device.application, "remote_at_command"):
return self._endpoint.device.application.remote_at_command(
self._endpoint.device.nwk,
cmd_name,
*args,
apply_changes=apply_changes,
encryption=False,
**kwargs,
)
_LOGGER.debug("Remote AT%s command: %s", cmd_name, args)
options = uint8_t(0)
if apply_changes:
options |= 0x02
return self._remote_at_command(options, cmd_name, *args)
async def _remote_at_command(self, options, name, *args):
_LOGGER.debug("Remote AT command: %s %s", name, args)
data = t.serialize(args, (AT_COMMANDS[name],))
try:
return await asyncio.wait_for(
await self._command(options, name.encode("ascii"), data, *args),
timeout=REMOTE_AT_COMMAND_TIMEOUT,
)
except asyncio.TimeoutError:
_LOGGER.warning("No response to %s command", name)
raise
async def _command(self, options, command, data, *args):
_LOGGER.debug("Command %s %s", command, data)
frame_id = self._seq
self._seq = (self._seq % 255) + 1
schema = (
uint8_t,
uint8_t,
uint8_t,
uint8_t,
self.EUI64,
self.NWK,
Bytes,
Bytes,
)
data = t.serialize(
(
0x32,
0x00,
options,
frame_id,
self._endpoint.device.application.ieee,
self._endpoint.device.application.nwk,
command,
data,
),
schema,
)
result = await self._endpoint.device.application.request(
self._endpoint.device,
XBEE_PROFILE_ID,
XBEE_AT_REQUEST_CLUSTER,
XBEE_AT_ENDPOINT,
XBEE_AT_ENDPOINT,
self._endpoint.device.application.get_sequence(),
data,
expect_reply=False,
)
future = asyncio.Future()
self._save_at_request(frame_id, future)
if result[0] != foundation.Status.SUCCESS:
future.set_exception(RuntimeError("AT Command request: {}".format(result)))
return future
async def command(
self, command_id, *args, manufacturer=None, expect_reply=False, tsn=None
):
"""Handle AT request."""
command = self.client_commands[command_id][0]
try:
value = args[0]
if isinstance(value, dict):
value = None
except IndexError:
value = None
if value:
value = await self.remote_at_command(command, value)
else:
value = await self.remote_at_command(command)
tsn = self._endpoint.device.application.get_sequence()
hdr = foundation.ZCLHeader.cluster(tsn, command_id)
self._endpoint.device.endpoints[232].out_clusters[
LevelControl.cluster_id
].handle_cluster_request(hdr, value)
return 0, foundation.Status.SUCCESS
class XBeeRemoteATResponse(LocalDataCluster):
"""Remote AT Command Response Cluster."""
cluster_id = XBEE_AT_RESPONSE_CLUSTER
_awaiting = {}
class ATCommandResult(enum.IntEnum):
"""AT command results."""
OK = 0
ERROR = 1
INVALID_COMMAND = 2
INVALID_PARAMETER = 3
TX_FAILURE = 4
class ATCommand(Bytes):
"""AT command serializable class."""
@classmethod
def deserialize(cls, data):
"""Deserialize ATCommand."""
return cls(data[:2]), data[2:]
def save_at_request(self, frame_id, future):
"""Save pending request."""
self._awaiting[frame_id] = (future,)
def handle_cluster_request(
self,
hdr: foundation.ZCLHeader,
args: List[Any],
*,
dst_addressing: Optional[
Union[t.Addressing.Group, t.Addressing.IEEE, t.Addressing.NWK]
] = None,
):
"""Handle AT response."""
if hdr.command_id == DATA_IN_CMD:
frame_id = args[0]
cmd = args[1]
status = args[2]
value = args[3]
_LOGGER.debug(
"Remote AT command response: %s", (frame_id, cmd, status, value)
)
(fut,) = self._awaiting.pop(frame_id)
try:
status = self.ATCommandResult(status)
except ValueError:
status = self.ATCommandResult.ERROR
if status:
fut.set_exception(
RuntimeError("AT Command response: {}".format(status.name))
)
return
response_type = AT_COMMANDS[cmd.decode("ascii")]
if response_type is None or len(value) == 0:
fut.set_result(None)
return
response, remains = response_type.deserialize(value)
fut.set_result(response)
else:
super().handle_cluster_request(hdr, args)
client_commands = {}
server_commands = {
0x0000: (
"remote_at_response",
(
uint8_t,
ATCommand,
uint8_t,
Bytes,
),
None,
)
}
class XBeeCommon(CustomDevice):
"""XBee common class."""
def remote_at(self, command, *args, **kwargs):
"""Remote at command."""
return (
self.endpoints[230]
.out_clusters[XBEE_AT_REQUEST_CLUSTER]
.remote_at_command(command, *args, apply_changes=True, **kwargs)
)
def deserialize(self, endpoint_id, cluster_id, data):
"""Deserialize."""
tsn = self._application.get_sequence()
command_id = 0x0000
hdr = foundation.ZCLHeader.cluster(tsn, command_id)
data = hdr.serialize() + data
return super().deserialize(endpoint_id, cluster_id, data)
class DigitalIOCluster(LocalDataCluster, BinaryInput):
"""Digital IO Cluster for the XBee."""
cluster_id = XBEE_IO_CLUSTER
class IOSample(bytes):
"""Parse an XBee IO sample report."""
# pylint: disable=R0201
def serialize(self):
"""Serialize an IO Sample Report, Not implemented."""
_LOGGER.debug("Serialize not implemented.")
@classmethod
def deserialize(cls, data):
"""Deserialize an xbee IO sample report.
xbee digital sample format
Sample set count byte 0
Digital mask byte 1, 2
Analog mask byte 3
Digital samples byte 4, 5 (if any sample exists)
Analog Sample, 2 bytes per
"""
sample_sets = int.from_bytes(data[0:1], byteorder="big")
if sample_sets != 1:
_LOGGER.warning("Number of sets is not 1")
digital_mask = data[1:3]
analog_mask = data[3:4]
digital_sample = data[4:6]
num_bits = 13
digital_pins = [
(int.from_bytes(digital_mask, byteorder="big") >> bit) & 1
for bit in range(num_bits - 1, -1, -1)
]
digital_pins = list(reversed(digital_pins))
analog_pins = [
(int.from_bytes(analog_mask, byteorder="big") >> bit) & 1
for bit in range(8 - 1, -1, -1)
]
analog_pins = list(reversed(analog_pins))
if 1 in digital_pins:
digital_samples = [
(int.from_bytes(digital_sample, byteorder="big") >> bit) & 1
for bit in range(num_bits - 1, -1, -1)
]
digital_samples = list(reversed(digital_samples))
sample_index = 6
else:
# skip digital samples block
digital_samples = digital_pins
sample_index = 4
analog_samples = []
for apin in analog_pins:
if apin == 1:
analog_samples.append(
int.from_bytes(
data[sample_index : sample_index + 2], byteorder="big"
)
)
sample_index += 2
else:
analog_samples.append(0)
return (
{
"digital_pins": digital_pins,
"analog_pins": analog_pins,
"digital_samples": digital_samples,
"analog_samples": analog_samples,
},
data[sample_index:],
)
def handle_cluster_request(
self,
hdr: foundation.ZCLHeader,
args: List[Any],
*,
dst_addressing: Optional[
Union[t.Addressing.Group, t.Addressing.IEEE, t.Addressing.NWK]
] = None,
):
"""Handle the cluster request.
Update the digital pin states
"""
if hdr.command_id == ON_OFF_CMD:
values = args[0]
if "digital_pins" in values and "digital_samples" in values:
# Update digital inputs
active_pins = [
i for i, x in enumerate(values["digital_pins"]) if x == 1
]
for pin in active_pins:
# pylint: disable=W0212
self._endpoint.device[0xD0 + pin].on_off._update_attribute(
ATTR_ON_OFF, values["digital_samples"][pin]
)
if "analog_pins" in values and "analog_samples" in values:
# Update analog inputs
active_pins = [
i for i, x in enumerate(values["analog_pins"]) if x == 1
]
for pin in active_pins:
# pylint: disable=W0212
self._endpoint.device[
0xD0 + pin
].analog_input._update_attribute(
ATTR_PRESENT_VALUE,
values["analog_samples"][pin]
/ (10.23 if pin != 7 else 1000), # supply voltage is in mV
)
else:
super().handle_cluster_request(hdr, args)
attributes = {0x0055: ("present_value", t.Bool)}
client_commands = {}
server_commands = {0x0000: ("io_sample", (IOSample,), False)}
# pylint: disable=too-many-ancestors
class EventRelayCluster(EventableCluster, LocalDataCluster, LevelControl):
"""A cluster with cluster_id which is allowed to send events."""
attributes = {}
client_commands = {}
def __init__(self, *args, **kwargs):
"""Generate server_commands from AT_COMMANDS."""
super().__init__(*args, **kwargs)
self.server_commands = {
k: (v[0].lower() + "_command_response", (str,), None)
for k, v in zip(range(1, len(AT_COMMANDS) + 1), AT_COMMANDS.items())
}
self.server_commands[0x0000] = ("receive_data", (str,), None)
class SerialDataCluster(LocalDataCluster):
"""Serial Data Cluster for the XBee."""
cluster_id = XBEE_DATA_CLUSTER
ep_attribute = "xbee_serial_data"
class BinaryString(str):
"""Class to parse and serialize binary data as string."""
def serialize(self):
"""Serialize string into bytes."""
return bytes(self, encoding="latin1")
@classmethod
def deserialize(cls, data):
"""Interpret data as string."""
data = str(data, encoding="latin1")
return (cls(data), b"")
def command(
self, command_id, *args, manufacturer=None, expect_reply=False, tsn=None
):
"""Handle outgoing data."""
data = self.BinaryString(args[0]).serialize()
return self._endpoint.device.application.request(
self._endpoint.device,
XBEE_PROFILE_ID,
XBEE_DATA_CLUSTER,
XBEE_DATA_ENDPOINT,
XBEE_DATA_ENDPOINT,
self._endpoint.device.application.get_sequence(),
data,
expect_reply=False,
)
def handle_cluster_request(
self,
hdr: foundation.ZCLHeader,
args: List[Any],
*,
dst_addressing: Optional[
Union[t.Addressing.Group, t.Addressing.IEEE, t.Addressing.NWK]
] = None,
):
"""Handle incoming data."""
if hdr.command_id == DATA_IN_CMD:
self._endpoint.out_clusters[
LevelControl.cluster_id
].handle_cluster_request(hdr, args[0])
else:
super().handle_cluster_request(hdr, args)
attributes = {}
client_commands = {0x0000: ("send_data", (BinaryString,), None)}
server_commands = {0x0000: ("receive_data", (BinaryString,), None)}
replacement = {
ENDPOINTS: {
230: {
INPUT_CLUSTERS: [XBeeRemoteATResponse],
OUTPUT_CLUSTERS: [XBeeRemoteATRequest],
},
232: {
INPUT_CLUSTERS: [DigitalIOCluster, SerialDataCluster, XBeeBasic],
OUTPUT_CLUSTERS: [SerialDataCluster, EventRelayCluster],
},
},
"manufacturer": "Digi",
}
| 31.410448 | 87 | 0.55215 |
ace64d02305c05890bd586c9b4a08dcbb59482ad | 1,698 | py | Python | tests/model/test_model.py | spirali/k | 52dea07cfe32b62425d2c813334e967b59d176a4 | [
"MIT"
] | 2 | 2020-07-24T08:40:45.000Z | 2021-04-27T08:27:35.000Z | tests/model/test_model.py | spirali/k | 52dea07cfe32b62425d2c813334e967b59d176a4 | [
"MIT"
] | 6 | 2020-10-27T10:02:19.000Z | 2022-02-08T07:14:45.000Z | tests/model/test_model.py | spirali/k | 52dea07cfe32b62425d2c813334e967b59d176a4 | [
"MIT"
] | 3 | 2020-07-24T11:32:30.000Z | 2021-04-29T11:41:05.000Z | import numpy as np
from tensorflow import keras
from tensorflow.keras.layers import Dense
from kitt.dataloading import BatchLoader, ListDataLoader
from kitt.dataloading.preprocessing import Preprocessing, ScalePreprocessing
from kitt.dataloading.tf import KerasSequence
from kitt.model import ModelWrapper
def test_model_map_loader():
a = [np.array([v]) for v in range(5)]
b = [np.array([v]) for v in range(5, 10)]
loader = ListDataLoader(list(zip(a, b)))
class Model(ModelWrapper):
def input_preprocessing(self) -> Preprocessing:
return ScalePreprocessing(2.0)
def output_preprocessing(self) -> Preprocessing:
return ScalePreprocessing(3.0)
model = Model()
model_loader = model.map_loader(loader)
data = list(model_loader)
a_mapped = [v * 2 for v in a]
b_mapped = [v * 3 for v in b]
assert data == list(zip(a_mapped, b_mapped))
def test_model_parallel_train():
a = [np.array([v]) for v in range(5)]
b = [np.array([v]) for v in range(5, 10)]
loader = ListDataLoader(list(zip(a, b)))
loader = KerasSequence(BatchLoader(loader, 2))
class Model(ModelWrapper):
def input_preprocessing(self) -> Preprocessing:
return ScalePreprocessing(2.0)
def output_preprocessing(self) -> Preprocessing:
return ScalePreprocessing(3.0)
def build_network(self) -> keras.Model:
return keras.Sequential([Dense(50), Dense(2)])
def compile(self, model: keras.Model):
model.compile(optimizer="adam", loss="mse")
model = Model()
network = model.build()
network.fit(loader, epochs=2, workers=2, use_multiprocessing=True)
| 32.037736 | 76 | 0.673145 |
ace64d255f3050ddba24f9e09badc3c66887bac4 | 112 | py | Python | Easy/first-non-repeating-character/solution_1/first-non-repeating-character.py | moisestech/algoexpert | 35953d3719afdea45a7fa364735a2ea10d2b9a9c | [
"MIT"
] | 1 | 2021-12-20T03:49:34.000Z | 2021-12-20T03:49:34.000Z | Easy/first-non-repeating-character/solution_1/first-non-repeating-character.py | moisestech/algoexpert | 35953d3719afdea45a7fa364735a2ea10d2b9a9c | [
"MIT"
] | null | null | null | Easy/first-non-repeating-character/solution_1/first-non-repeating-character.py | moisestech/algoexpert | 35953d3719afdea45a7fa364735a2ea10d2b9a9c | [
"MIT"
] | null | null | null | # SOLUTION 1
# First Non-Repeating Character
# Complexity
# Average: Time: | Space:
# Worst: Time: | Space: | 18.666667 | 31 | 0.669643 |
ace64d4f9a25424ce43fc2ca340eaf5e1bdfe725 | 4,753 | py | Python | configs/centernet/centernet_segm_r18_1x_centerfpn.py | sokunmin/mmdetection | 2d8ef6ad36dae912dba71f83934e7dd5a0ced3eb | [
"Apache-2.0"
] | null | null | null | configs/centernet/centernet_segm_r18_1x_centerfpn.py | sokunmin/mmdetection | 2d8ef6ad36dae912dba71f83934e7dd5a0ced3eb | [
"Apache-2.0"
] | null | null | null | configs/centernet/centernet_segm_r18_1x_centerfpn.py | sokunmin/mmdetection | 2d8ef6ad36dae912dba71f83934e7dd5a0ced3eb | [
"Apache-2.0"
] | null | null | null | _base_ = [
'../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]
dataset_type = 'CocoPersonDataset'
data_root = 'data/coco/'
# model settings
model = dict(
type='CenterNet',
pretrained='torchvision://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
zero_init_residual=False,
style='pytorch'),
neck=dict(
type='CenterFPN',
in_channels=(512, 256, 128, 64),
out_channels=64,
level_index=0,
reverse_levels=True,
with_last_norm=True,
with_last_relu=True,
upsample_cfg=dict(
type='deconv',
kernel_size=4,
stride=2,
padding=1,
output_padding=0,
bias=False)),
bbox_head=dict(
type='CenterHead',
num_classes=1,
in_channels=64,
feat_channels=64,
loss_heatmap=dict(
type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
loss_offset=dict(type='L1Loss', loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=0.1)),
mask_head=dict(
type='CenterMaskHead',
num_classes=1,
in_channels=64,
feat_channels=64,
saliency_channels=1,
shape_channels=576, # 576: 24x24, 1024: 32x32
loss_mask=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
min_overlap=0.7,
debug=False)
test_cfg = dict(
score_thr=0.01,
mask_score_thr=0.4,
max_per_img=100)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations',
with_bbox=True,
with_mask=True),
dict(type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(type='RandomLighting', scale=0.1),
dict(type='RandomCenterCropPad',
crop_size=(512, 512),
ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
test_mode=False,
test_pad_mode=None,
with_mask2bbox=True,
**img_norm_cfg),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
]
test_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=False,
transforms=[
dict(type='Resize'),
dict(type='RandomFlip'),
dict(type='Pad', size_divisor=32),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(
type='Collect',
keys=['img'],
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg')),
])
]
classes = ('person',)
data = dict(
samples_per_gpu=32,
workers_per_gpu=2,
train=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'annotations/person_keypoints_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'annotations/person_keypoints_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'annotations/person_keypoints_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 5,
step=[120])
checkpoint_config = dict(interval=5)
evaluation = dict(interval=1, metric=['bbox', 'segm'], multitask=True)
# runtime settings
total_epochs = 130
cudnn_benchmark = True
find_unused_parameters = True
log_config = dict(
interval=5,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardImageHook'),
]) | 30.863636 | 77 | 0.60446 |
ace64e21006c14e8a966aa0fd5cc78a8efa23c79 | 2,503 | py | Python | madsenlab/axelrod/analysis/order_parameters.py | mmadsen/axelrod-ct | 90ea4319dd571546888c4d2a50255514e7d7fb94 | [
"Apache-2.0"
] | 5 | 2015-05-03T08:49:11.000Z | 2022-03-23T11:44:00.000Z | madsenlab/axelrod/analysis/order_parameters.py | mmadsen/axelrod-ct | 90ea4319dd571546888c4d2a50255514e7d7fb94 | [
"Apache-2.0"
] | null | null | null | madsenlab/axelrod/analysis/order_parameters.py | mmadsen/axelrod-ct | 90ea4319dd571546888c4d2a50255514e7d7fb94 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
import overlap as o
import logging as log
import numpy as np
def klemm_normalized_L_axelrod(pop,simconfig):
"""
The normalized Lyapunov potential defined in Klemm et al. 2003, Physica A (327) 1-5. Implements
Equation (1).
Ranges between [0,1], with 0 possible only for the completely homogeneous configurations.
Variable names differ from the rest of the codebase, but are designed to be identical to the Klemm notation.
"""
# following is shorthand for the NetworkX graph
g = pop.agentgraph
N = simconfig.popsize
z = pop.get_coordination_number()
F = simconfig.num_features
#log.debug("z: %s F: %s N: %s", z, F, N)
norm_constant = 2.0 / (z * N * F)
sums = 0
for (a,b) in g.edges_iter():
(a_id, a_traits) = pop.get_agent_by_id(a)
(b_id, b_traits) = pop.get_agent_by_id(b)
overlap = o.calc_overlap_axelrod(a_traits, b_traits)
sums += (F - overlap)
result = norm_constant * sums
#log.debug("Klemm normalized L: %s norm constant: %s sum: %s", result, norm_constant, sums )
return result
def klemm_normalized_L_extensible(pop, simconfig):
"""
The normalized Lyapunov potential defined in Klemm et al. 2003, Physica A (327) 1-5. Implements
Equation (1).
Ranges between [0,1], with 0 possible only for the completely homogeneous configurations.
Variable names differ from the rest of the codebase, but are designed to be identical to the Klemm notation.
Basic idea is the same as the core axelrod model, except num_features is the max number of traits in the
population
"""
g = pop.agentgraph
N = simconfig.popsize
z = pop.get_coordination_number()
sizes = []
for nodename in g.nodes():
sizes.append(len(g.node[nodename]['traits']))
F = np.amax(np.asarray(sizes))
norm_constant = 2.0 / (z * N * F)
sums = 0
for (a,b) in g.edges_iter():
(a_id, a_traits) = pop.get_agent_by_id(a)
(b_id, b_traits) = pop.get_agent_by_id(b)
overlap = o.calc_overlap_extensible(a_traits, b_traits)
sums += (F - overlap)
result = norm_constant * sums
#log.debug("Klemm normalized L: %s norm constant: %s sum: %s", result, norm_constant, sums )
return result | 32.089744 | 119 | 0.669197 |
ace6500a6f4d3b32a92f5bdfa0474cf18a32378b | 210 | py | Python | part25.26/blog/admin.py | yllew36/WellyGI | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | [
"Apache-2.0"
] | 1 | 2019-11-15T08:02:45.000Z | 2019-11-15T08:02:45.000Z | part28/blog/admin.py | yllew36/WellyGI | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | [
"Apache-2.0"
] | null | null | null | part28/blog/admin.py | yllew36/WellyGI | 7d53fac4c81bb994f61b22761e5ac7e48994ade4 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Post
class PostAdmin(admin.ModelAdmin):
readonly_fields = ['slug','publish','update']
admin.site.register(Post, PostAdmin)
| 19.090909 | 46 | 0.766667 |
ace6504ffe54335e0add333b0e401dd9793fd754 | 18,205 | py | Python | django/templatetags/i18n.py | jedie/django | 09f2cdbe1a43e79e31f5ea509b59d4c87db29832 | [
"BSD-3-Clause"
] | null | null | null | django/templatetags/i18n.py | jedie/django | 09f2cdbe1a43e79e31f5ea509b59d4c87db29832 | [
"BSD-3-Clause"
] | null | null | null | django/templatetags/i18n.py | jedie/django | 09f2cdbe1a43e79e31f5ea509b59d4c87db29832 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import sys
from django.conf import settings
from django.template import Library, Node, TemplateSyntaxError, Variable
from django.template.base import TOKEN_TEXT, TOKEN_VAR, render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import six, translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = lang_code
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = languages
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None, trimmed=False):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
self.trimmed = trimmed
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents.replace('%', '%%'))
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
msg = ''.join(result)
if self.trimmed:
msg = translation.trim_whitespace(msg)
return msg, vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
default_value = context.template.engine.string_if_invalid
def render_value(key):
if key in context:
val = context[key]
else:
val = default_value % key if '%s' in default_value else default_value
return render_value_in_context(val, context)
data = {v: render_value(v) for v in vars}
context.pop()
try:
result = result % data
except (KeyError, ValueError):
if nested:
# Either string is malformed, or it's a bug
raise TemplateSyntaxError("'blocktrans' is unable to format "
"string returned by gettext: %r using %r" % (result, data))
with translation.override(None):
result = self.render(context, nested=True)
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style list (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_translated }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_translated(lang_code):
english_name = translation.get_language_info(lang_code)['name']
return translation.ugettext(english_name)
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0])
message_string = parser.compile_filter(bits[1])
remaining = bits[2:]
noop = False
asvar = None
message_context = None
seen = set()
invalid_context = {'as', 'noop'}
while remaining:
option = remaining.pop(0)
if option in seen:
raise TemplateSyntaxError(
"The '%s' option was specified more than once." % option,
)
elif option == 'noop':
noop = True
elif option == 'context':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the context option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
if value in invalid_context:
raise TemplateSyntaxError(
"Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]),
)
message_context = parser.compile_filter(value)
elif option == 'as':
try:
value = remaining.pop(0)
except IndexError:
msg = "No argument provided to the '%s' tag for the as option." % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
asvar = value
else:
raise TemplateSyntaxError(
"Unknown argument for '%s' tag: '%s'. The only options "
"available are 'noop', 'context' \"xxx\", and 'as VAR'." % (
bits[0], option,
)
)
seen.add(option)
return TranslateNode(message_string, noop, asvar, message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
msg = (
'"context" in %r tag expected '
'exactly one argument.') % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
elif option == "trimmed":
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = list(options['count'].items())[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
trimmed = options.get("trimmed", False)
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context, trimmed=trimmed)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
| 34.284369 | 117 | 0.618676 |
ace652030e8f8e3a7c306634fe145587617a63d7 | 31,243 | py | Python | pypureclient/flasharray/FA_2_9/api/protection_group_snapshots_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 14 | 2018-12-07T18:30:27.000Z | 2022-02-22T09:12:33.000Z | pypureclient/flasharray/FA_2_9/api/protection_group_snapshots_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 28 | 2019-09-17T21:03:52.000Z | 2022-03-29T22:07:35.000Z | pypureclient/flasharray/FA_2_9/api/protection_group_snapshots_api.py | Flav-STOR-WL/py-pure-client | 03b889c997d90380ac5d6380ca5d5432792d3e89 | [
"BSD-2-Clause"
] | 15 | 2020-06-11T15:50:08.000Z | 2022-03-21T09:27:25.000Z | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re
# python 2 and python 3 compatibility library
import six
from typing import List, Optional
from .. import models
class ProtectionGroupSnapshotsApi(object):
def __init__(self, api_client):
self.api_client = api_client
def api29_protection_group_snapshots_delete_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> None
"""Delete a protection group snapshot
Deletes a protection group snapshot that has been destroyed and is pending eradication. Eradicating a protection group snapshot eradicates all of its protection group snapshots. Eradicated protection group snapshots cannot be recovered. Protection group snapshots are destroyed through the `PATCH` method. The `ids` or `names` parameter is required, but cannot be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api29_protection_group_snapshots_delete_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.9/protection-group-snapshots', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api29_protection_group_snapshots_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
continuation_token=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
source_names=None, # type: List[str]
total_item_count=None, # type: bool
total_only=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ProtectionGroupSnapshotGetResponse
"""List protection group snapshots
Displays a list of protection group snapshots, including those pending eradication.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api29_protection_group_snapshots_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param str continuation_token: A token used to retrieve the next page of data with some consistency guaranteed. The token is a Base64 encoded value. Set `continuation_token` to the system-generated token taken from the `x-next-token` header field of the response. A query has reached its last page when the response does not include a token. Pagination requires the `limit` and `continuation_token` query parameters.
:param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param list[str] source_names: Performs the operation on the source name specified. Enter multiple source names in comma-separated format. For example, `name01,name02`.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool total_only: If set to `true`, returns the aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ProtectionGroupSnapshotGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
if source_names is not None:
if not isinstance(source_names, list):
source_names = [source_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api29_protection_group_snapshots_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api29_protection_group_snapshots_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'continuation_token' in params:
query_params.append(('continuation_token', params['continuation_token']))
if 'destroyed' in params:
query_params.append(('destroyed', params['destroyed']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'source_names' in params:
query_params.append(('source_names', params['source_names']))
collection_formats['source_names'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.9/protection-group-snapshots', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProtectionGroupSnapshotGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api29_protection_group_snapshots_patch_with_http_info(
self,
protection_group_snapshot=None, # type: models.ProtectionGroupSnapshotPatch
authorization=None, # type: str
x_request_id=None, # type: str
names=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ProtectionGroupSnapshotResponse
"""Modify a protection group snapshot
Modifies a protection group snapshot so that it can be destroyed. To destroy a volume, set `destroyed=true`. To recover a volume that has been destroyed and is pending eradication, set `destroyed=false`. The `names` parameter is required.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api29_protection_group_snapshots_patch_with_http_info(protection_group_snapshot, async_req=True)
>>> result = thread.get()
:param ProtectionGroupSnapshotPatch protection_group_snapshot: (required)
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ProtectionGroupSnapshotResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
# verify the required parameter 'protection_group_snapshot' is set
if protection_group_snapshot is None:
raise TypeError("Missing the required parameter `protection_group_snapshot` when calling `api29_protection_group_snapshots_patch`")
collection_formats = {}
path_params = {}
query_params = []
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'protection_group_snapshot' in params:
body_params = params['protection_group_snapshot']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.9/protection-group-snapshots', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProtectionGroupSnapshotResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api29_protection_group_snapshots_post_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
apply_retention=None, # type: bool
for_replication=None, # type: bool
replicate=None, # type: bool
replicate_now=None, # type: bool
source_names=None, # type: List[str]
protection_group_snapshot=None, # type: models.ProtectionGroupSnapshotPost
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ProtectionGroupSnapshotResponse
"""Create a protection group snapshot
Creates a point-in-time snapshot of the contents of a protection group. The `source_ids` or `source_names` parameter is required, but cannot be set together.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api29_protection_group_snapshots_post_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param bool apply_retention: If `true`, applies the local and remote retention policy to the snapshots.
:param bool for_replication: If `true`, destroys and eradicates the snapshot after 1 hour.
:param bool replicate: If set to `true`, queues up and begins replicating to each allowed target after all earlier replication sessions for the same protection group have been completed to that target. The `replicate` and `replicate_now` parameters cannot be used together.
:param bool replicate_now: If set to `true`, replicates the snapshots to each allowed target. The `replicate` and `replicate_now` parameters cannot be used together.
:param list[str] source_names: Performs the operation on the source name specified. Enter multiple source names in comma-separated format. For example, `name01,name02`.
:param ProtectionGroupSnapshotPost protection_group_snapshot:
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ProtectionGroupSnapshotResponse
If the method is called asynchronously,
returns the request thread.
"""
if source_names is not None:
if not isinstance(source_names, list):
source_names = [source_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
collection_formats = {}
path_params = {}
query_params = []
if 'apply_retention' in params:
query_params.append(('apply_retention', params['apply_retention']))
if 'for_replication' in params:
query_params.append(('for_replication', params['for_replication']))
if 'replicate' in params:
query_params.append(('replicate', params['replicate']))
if 'replicate_now' in params:
query_params.append(('replicate_now', params['replicate_now']))
if 'source_names' in params:
query_params.append(('source_names', params['source_names']))
collection_formats['source_names'] = 'csv'
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
if 'protection_group_snapshot' in params:
body_params = params['protection_group_snapshot']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.9/protection-group-snapshots', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProtectionGroupSnapshotResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
def api29_protection_group_snapshots_transfer_get_with_http_info(
self,
authorization=None, # type: str
x_request_id=None, # type: str
destroyed=None, # type: bool
filter=None, # type: str
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
source_names=None, # type: List[str]
total_item_count=None, # type: bool
total_only=None, # type: bool
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ProtectionGroupSnapshotTransferGetResponse
"""List protection group snapshots with transfer statistics
Returns a list of protection group snapshots and their transfer statistics.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.api29_protection_group_snapshots_transfer_get_with_http_info(async_req=True)
>>> result = thread.get()
:param str authorization: Access token (in JWT format) required to use any API endpoint (except `/oauth2`, `/login`, and `/logout`)
:param str x_request_id: Supplied by client during request or generated by server.
:param bool destroyed: If set to `true`, lists only destroyed objects that are in the eradication pending state. If set to `false`, lists only objects that are not destroyed. For destroyed objects, the time remaining is displayed in milliseconds.
:param str filter: Narrows down the results to only the response objects that satisfy the filter criteria.
:param int limit: Limits the size of the response to the specified number of objects on each page. To return the total number of resources, set `limit=0`. The total number of resources is returned as a `total_item_count` value. If the page size requested is larger than the system maximum limit, the server returns the maximum limit, disregarding the requested page size.
:param list[str] names: Performs the operation on the unique name specified. Enter multiple names in comma-separated format. For example, `name01,name02`.
:param int offset: The starting position based on the results of the query in relation to the full set of response objects returned.
:param list[str] sort: Returns the response objects in the order specified. Set `sort` to the name in the response by which to sort. Sorting can be performed on any of the names in the response, and the objects can be sorted in ascending or descending order. By default, the response objects are sorted in ascending order. To sort in descending order, append the minus sign (`-`) to the name. A single request can be sorted on multiple objects. For example, you can sort all volumes from largest to smallest volume size, and then sort volumes of the same size in ascending order by volume name. To sort on multiple names, list the names as comma-separated values.
:param list[str] source_names: Performs the operation on the source name specified. Enter multiple source names in comma-separated format. For example, `name01,name02`.
:param bool total_item_count: If set to `true`, the `total_item_count` matching the specified query parameters is calculated and returned in the response. If set to `false`, the `total_item_count` is `null` in the response. This may speed up queries where the `total_item_count` is large. If not specified, defaults to `false`.
:param bool total_only: If set to `true`, returns the aggregate value of all items after filtering. Where it makes more sense, the average value is displayed instead. The values are displayed for each name where meaningful. If `total_only=true`, the `items` list will be empty.
:param bool async_req: Request runs in separate thread and method returns multiprocessing.pool.ApplyResult.
:param bool _return_http_data_only: Returns only data field.
:param bool _preload_content: Response is converted into objects.
:param int _request_timeout: Total request timeout in seconds.
It can also be a tuple of (connection time, read time) timeouts.
:return: ProtectionGroupSnapshotTransferGetResponse
If the method is called asynchronously,
returns the request thread.
"""
if names is not None:
if not isinstance(names, list):
names = [names]
if sort is not None:
if not isinstance(sort, list):
sort = [sort]
if source_names is not None:
if not isinstance(source_names, list):
source_names = [source_names]
params = {k: v for k, v in six.iteritems(locals()) if v is not None}
# Convert the filter into a string
if params.get('filter'):
params['filter'] = str(params['filter'])
if params.get('sort'):
params['sort'] = [str(_x) for _x in params['sort']]
if 'limit' in params and params['limit'] < 1:
raise ValueError("Invalid value for parameter `limit` when calling `api29_protection_group_snapshots_transfer_get`, must be a value greater than or equal to `1`")
if 'offset' in params and params['offset'] < 0:
raise ValueError("Invalid value for parameter `offset` when calling `api29_protection_group_snapshots_transfer_get`, must be a value greater than or equal to `0`")
collection_formats = {}
path_params = {}
query_params = []
if 'destroyed' in params:
query_params.append(('destroyed', params['destroyed']))
if 'filter' in params:
query_params.append(('filter', params['filter']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'names' in params:
query_params.append(('names', params['names']))
collection_formats['names'] = 'csv'
if 'offset' in params:
query_params.append(('offset', params['offset']))
if 'sort' in params:
query_params.append(('sort', params['sort']))
collection_formats['sort'] = 'csv'
if 'source_names' in params:
query_params.append(('source_names', params['source_names']))
collection_formats['source_names'] = 'csv'
if 'total_item_count' in params:
query_params.append(('total_item_count', params['total_item_count']))
if 'total_only' in params:
query_params.append(('total_only', params['total_only']))
header_params = {}
if 'authorization' in params:
header_params['Authorization'] = params['authorization']
if 'x_request_id' in params:
header_params['X-Request-ID'] = params['x_request_id']
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type(
['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(
'/api/2.9/protection-group-snapshots/transfer', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ProtectionGroupSnapshotTransferGetResponse',
auth_settings=auth_settings,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
collection_formats=collection_formats,
)
| 53.224872 | 671 | 0.662004 |
ace652034c6467170a4b7edb09cbcae3a2c61c61 | 8,228 | py | Python | grr/server/grr_response_server/gui/selenium_tests/acl_manager_test.py | certxlm/grr | c2a442a27f656fb18dfa3bce098847e5c5b849d7 | [
"Apache-2.0"
] | 1 | 2019-08-28T23:48:20.000Z | 2019-08-28T23:48:20.000Z | grr/server/grr_response_server/gui/selenium_tests/acl_manager_test.py | AjitNair2/grr | 2a2ea891b3927775872904cdd402a18e7bb3d143 | [
"Apache-2.0"
] | 2 | 2022-01-15T03:18:12.000Z | 2022-02-13T22:02:43.000Z | grr/server/grr_response_server/gui/selenium_tests/acl_manager_test.py | acidburn0zzz/grr | 44e1a5b1630e8101610faaaebe15b19b5ad30cb1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests the access control authorization workflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_core.lib import utils
from grr_response_server.gui import gui_test_lib
from grr.test_lib import test_lib
class TestACLWorkflow(gui_test_lib.GRRSeleniumTest):
"""Tests the access control workflow."""
# Using an Unicode string for the test here would be optimal but Selenium
# can't correctly enter Unicode text into forms.
reason = "Felt like it!"
def setUp(self):
super(TestACLWorkflow, self).setUp()
self.client_id_1 = self.SetupClient(0)
self.client_id_2 = self.SetupClient(1)
def testNavigatorLinksDisabledForClientWithoutApproval(self):
self.Open("/#/clients/%s?navigator-test" % self.client_id_1)
self.WaitUntil(self.IsElementPresent,
"css=a[grrtarget='client.vfs'].disabled")
self.WaitUntil(self.IsElementPresent,
"css=a[grrtarget='client.launchFlows'].disabled")
self.WaitUntil(self.IsElementPresent,
"css=a[grrtarget='client.flows'].disabled")
# Only the "Host Information" navigation link should be active.
self.WaitUntil(self.IsElementPresent,
"css=a[grrtarget='client.hostInfo']:not(.disabled)")
def testApprovalNotificationIsShownInHostInfoForUnapprovedClient(self):
self.Open("/#/clients/%s" % self.client_id_1)
self.WaitUntil(self.IsTextPresent,
"You do not have an approval for this client.")
def testClickingOnRequestApprovalShowsApprovalDialog(self):
self.Open("/#/clients/%s" % self.client_id_1)
self.Click("css=button[name=requestApproval]")
self.WaitUntil(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
def testClientACLWorkflow(self):
self.Open("/")
self.Type("client_query", self.client_id_1)
self.Click("client_query_submit")
self.WaitUntilEqual(self.client_id_1, self.GetText,
"css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('%s')" % self.client_id_1)
# We do not have an approval, so we need to request one.
self.WaitUntil(self.IsElementPresent, "css=div.no-approval")
self.Click("css=button[name=requestApproval]")
self.WaitUntil(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
# This asks the user "test" (which is us) to approve the request.
self.Type("css=grr-request-approval-dialog input[name=acl_approver]",
self.token.username)
self.Type("css=grr-request-approval-dialog input[name=acl_reason]",
self.reason)
self.Click(
"css=grr-request-approval-dialog button[name=Proceed]:not([disabled])")
self.WaitForNotification(self.token.username)
# User test logs in as an approver.
self.Open("/")
self.WaitUntil(lambda: self.GetText("notification_button") != "0")
self.Click("notification_button")
self.Click("css=td:contains('grant access to GRR client')")
self.WaitUntilContains("Grant access", self.GetText,
"css=h2:contains('Grant')")
self.WaitUntil(self.IsTextPresent,
"The user %s has requested" % self.token.username)
self.Click("css=button:contains('Approve')")
self.WaitUntil(self.IsTextPresent, "Approval granted.")
self.WaitForNotification(self.token.username)
self.Open("/")
# We should be notified that we have an approval
self.WaitUntil(lambda: self.GetText("notification_button") != "0")
self.Click("notification_button")
self.Click("css=td:contains('has granted you access')")
# This is insufficient - we need 2 approvers.
self.WaitUntil(self.IsTextPresent,
"You do not have an approval for this client.")
# Lets add another approver.
approval_id = self.ListClientApprovals(requestor=self.token.username)[0].id
self.GrantClientApproval(
self.client_id_1,
approval_id=approval_id,
requestor=self.token.username,
approver=u"approver")
# Check if we see that the approval has already been granted.
self.Open("/")
self.Click("notification_button")
self.Click("css=td:contains('grant access to GRR client')")
self.WaitUntil(self.IsTextPresent,
"This approval has already been granted!")
# Try again:
self.Open("/")
self.Click("notification_button")
self.Click("css=td:contains('has granted you access')")
# Host information page should be displayed.
self.WaitUntil(self.IsTextPresent, "Last booted")
self.WaitUntil(self.IsTextPresent, "Interfaces")
# One email for the original request and one for each approval.
self.assertLen(self.emails_sent, 3)
def testRecentReasonBox(self):
self.Open("/")
test_reason = u"ástæða"
self.RequestAndGrantClientApproval(self.client_id_2, reason=test_reason)
self.Type("client_query", self.client_id_2)
self.Click("client_query_submit")
self.WaitUntilEqual(self.client_id_2, self.GetText,
"css=span[type=subject]")
# Choose client 6
self.Click("css=td:contains('%s')" % self.client_id_2)
self.WaitUntil(self.IsTextPresent, u"Access reason: %s" % test_reason)
# By now we should have a recent reason set, let's see if it shows up in the
# ACL dialog.
self.Type("client_query", self.client_id_1)
self.Click("client_query_submit")
self.WaitUntilEqual(self.client_id_1, self.GetText,
"css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('%s')" % self.client_id_1)
# We do not have an approval, so check that the hint is shown, that the
# interrogate button is disabled and that the menu is disabled.
self.WaitUntil(self.IsElementPresent, "css=div.no-approval")
self.WaitUntil(self.IsElementPresent,
"css=button:contains('Interrogate')[disabled]")
self.WaitUntil(self.IsElementPresent, "css=a.nav-link.disabled")
# Request an approval.
self.Click("css=button[name=requestApproval]")
self.WaitUntil(self.IsElementPresent,
"css=h3:contains('Create a new approval')")
self.WaitUntilEqual(
2, self.GetCssCount, "css=grr-request-approval-dialog "
"select[name=acl_recent_reasons] option")
self.assertEqual(
"Enter New Reason...",
self.GetText("css=grr-request-approval-dialog "
"select[name=acl_recent_reasons] option:nth(0)"))
self.assertEqual(
test_reason,
self.GetText("css=grr-request-approval-dialog "
"select[name=acl_recent_reasons] option:nth(1)"))
# The reason text box should be there and enabled.
element = self.GetElement(
"css=grr-request-approval-dialog input[name=acl_reason]")
self.assertTrue(element.is_enabled())
self.Select(
"css=grr-request-approval-dialog select[name=acl_recent_reasons]",
test_reason)
# Make sure clicking the recent reason greys out the reason text box.
element = self.GetElement(
"css=grr-request-approval-dialog input[name=acl_reason]")
self.assertFalse(element.is_enabled())
# Ok now submit this.
self.Type("css=grr-request-approval-dialog input[name=acl_approver]",
self.token.username)
self.Click(
"css=grr-request-approval-dialog button[name=Proceed]:not([disabled])")
# "Request Approval" dialog should go away.
self.WaitUntilNot(self.IsVisible, "css=.modal-open")
# And make sure the approval was created...
def GetApprovals():
approvals = self.ListClientApprovals(requestor=self.token.username)
return list(
a for a in approvals if a.subject.client_id == self.client_id_1)
self.WaitUntilEqual(1, lambda: len(GetApprovals()))
# ... using the correct reason.
approvals = GetApprovals()
self.assertEqual(utils.SmartUnicode(approvals[0].reason), test_reason)
if __name__ == "__main__":
app.run(test_lib.main)
| 34.864407 | 80 | 0.679387 |
ace6521ebb00c9299e4358d50e5ba779e4a94a14 | 3,342 | py | Python | neighbourhood/models.py | samwel-chege/Neighborhood | bc7327d9084d1af1296416633a8152fddb9b4182 | [
"MIT"
] | null | null | null | neighbourhood/models.py | samwel-chege/Neighborhood | bc7327d9084d1af1296416633a8152fddb9b4182 | [
"MIT"
] | null | null | null | neighbourhood/models.py | samwel-chege/Neighborhood | bc7327d9084d1af1296416633a8152fddb9b4182 | [
"MIT"
] | null | null | null | import neighbourhood
from django.db import models
from django.contrib.auth.models import User
from django.db.models.deletion import CASCADE
from django.dispatch import receiver
from django.db.models.signals import post_save
# Create your models here.
class NewsLetterRecipients(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
name = models.CharField(max_length=100,default="Names")
profile_photo = models.ImageField(upload_to='photos')
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
def __str__(self):
return self.user.username
class Neighborhood(models.Model):
name = models.CharField(max_length=100)
location = models.CharField(max_length=100)
residents = models.IntegerField(default=1)
profile = models.ForeignKey(Profile,on_delete=models.CASCADE)
def save_neighborhood(self):
self.save()
def delete_neighborhood(self):
self.delete()
@classmethod
def search_neighbors(cls,search_term):
return cls.objects.filter(name__icontains = search_term)
@classmethod
def update_neighbors(cls,id,resident):
return cls.objects.filter(id=id,residents=resident)
def __str__(self):
return self.name
class Resident(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
name = models.CharField(max_length=100)
profile_photo = models.ImageField(upload_to='photos')
neighbourhood = models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
def save_residents(self):
self.save()
def delete_residents(self):
self.delete()
@classmethod
def search_resident(cls,search_term):
return cls.objects.filter(user__username__icontains =search_term)
def __str__(self):
return self.user.username
class Business(models.Model):
name = models.CharField(max_length=100)
photo = models.ImageField(upload_to='photos',default='no photo')
user = models.OneToOneField(User,on_delete=models.CASCADE,null=True)
neighborhood = models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
email_address = models.EmailField(max_length=100)
def save_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def search_business(cls,search_term):
return cls.objects.filter(name__icontains = search_term)
def __str__(self):
return self.name
class Post(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE)
title = models.CharField(max_length=100)
post = models.TextField()
photo = models.ImageField(upload_to='photos',default='no photo')
neighbourhood = models.ForeignKey(Neighborhood,on_delete=models.CASCADE)
post_date = models.DateTimeField(auto_now_add=True)
def save_post(self):
self.save()
def delete_post(self):
self.delete()
def __str__(self):
return self.title
| 29.575221 | 81 | 0.709755 |
ace653087713bf29768f9766cbffc690a8786cd6 | 1,768 | py | Python | aliyun-python-sdk-codeup/aliyunsdkcodeup/request/v20200414/DeleteRepositoryRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-codeup/aliyunsdkcodeup/request/v20200414/DeleteRepositoryRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-codeup/aliyunsdkcodeup/request/v20200414/DeleteRepositoryRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class DeleteRepositoryRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'codeup', '2020-04-14', 'DeleteRepository')
self.set_uri_pattern('/api/v3/projects/[ProjectId]/remove')
self.set_method('POST')
def get_OrganizationId(self):
return self.get_query_params().get('OrganizationId')
def set_OrganizationId(self,OrganizationId):
self.add_query_param('OrganizationId',OrganizationId)
def get_SubUserId(self):
return self.get_query_params().get('SubUserId')
def set_SubUserId(self,SubUserId):
self.add_query_param('SubUserId',SubUserId)
def get_AccessToken(self):
return self.get_query_params().get('AccessToken')
def set_AccessToken(self,AccessToken):
self.add_query_param('AccessToken',AccessToken)
def get_ProjectId(self):
return self.get_path_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_path_param('ProjectId',ProjectId) | 34.666667 | 72 | 0.765271 |
ace653660893c1ef44110200bbe72d215cb687c8 | 20,437 | py | Python | zetasql/parser/gen_parse_tree.py | aceforeverd/zetasql | 42cab5f4f7c3ed3ad823ac3929a83fbcd36948ae | [
"Apache-2.0"
] | 2 | 2021-04-09T15:40:37.000Z | 2022-02-17T09:38:06.000Z | zetasql/parser/gen_parse_tree.py | aceforeverd/zetasql | 42cab5f4f7c3ed3ad823ac3929a83fbcd36948ae | [
"Apache-2.0"
] | null | null | null | zetasql/parser/gen_parse_tree.py | aceforeverd/zetasql | 42cab5f4f7c3ed3ad823ac3929a83fbcd36948ae | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Defines parse tree nodes for the ZetaSQL parser.
This program defines parse tree node subclasses of ASTNode. It generates
headers and other files from templates.
Still a work in progress.
"""
import enum
import re
from absl import app
from absl import flags
import jinja2
from zetasql.parser.generator_utils import CleanComment
from zetasql.parser.generator_utils import ScalarType
from zetasql.parser.generator_utils import Trim
_make_enum_name_re = re.compile(r'([a-z])([A-Z])')
def NameToEnumName(name):
"""Convert a camel-case c++ ASTClassName into AST_CLASS_NAME."""
return _make_enum_name_re.sub(r'\1_\2', name.replace('AST', 'Ast')).upper()
SCALAR_BOOL = ScalarType(
'bool',
cpp_default='false')
SCALAR_STRING = ScalarType(
'std::string')
SCALAR_ID_STRING = ScalarType(
'IdString')
# Identifies the FieldLoader method used to populate member fields.
# Each node field in a subclass is added to the children_ vector in ASTNode,
# then additionally added to a type-specific field in the subclass using one
# of these methods:
# REQUIRED: The next node in the vector, which must exist, is used for this
# field.
# OPTIONAL: The next node in the vector, if it exists, is used for this field.
# REST_AS_REPEATED: All remaining nodes, if any, are used for this field,
# which should be a vector type.
# See Add* methods in ast_node.h for further details.
class FieldLoaderMethod(enum.Enum):
REQUIRED = 0
OPTIONAL = 1
REST_AS_REPEATED = 2
def Field(name,
ctype,
field_loader=FieldLoaderMethod.OPTIONAL,
comment=None,
gen_setters_and_getters=True):
"""Make a field to put in a node class.
Args:
name: field name
ctype: c++ type for this field
Should be a ScalarType like an int, string or enum type,
or the name of a node class type (e.g. ASTExpression).
Cannot be a pointer type, and should not include modifiers like
const.
field_loader: FieldLoaderMethod enum specifies which FieldLoader method
to use for this field.
comment: Comment text for this field. Text will be stripped and
de-indented.
gen_setters_and_getters: When False, suppress generation of default
template-based get and set methods. Non-standard alternatives
may be supplied via extra_defs.
Returns:
The newly created field.
Raises:
RuntimeError: If an error is detected in one or more arguments.
"""
if field_loader == FieldLoaderMethod.REST_AS_REPEATED:
is_vector = True
else:
is_vector = False
member_name = name + '_'
if isinstance(ctype, ScalarType):
member_type = ctype.ctype
cpp_default = ctype.cpp_default
is_node_ptr = False
enum_name = None
element_storage_type = None
else:
element_storage_type = 'const %s*' % ctype
if is_vector:
member_type = 'absl::Span<%s const>' % element_storage_type
cpp_default = ''
is_node_ptr = False
enum_name = None
else:
member_type = 'const %s*' % ctype
cpp_default = 'nullptr'
is_node_ptr = True
enum_name = NameToEnumName(ctype)
return {
'ctype': ctype,
'cpp_default': cpp_default,
'member_name': member_name, # member variable name
'name': name, # name without trailing underscore
'comment': CleanComment(comment, prefix=' // '),
'member_type': member_type,
'is_node_ptr': is_node_ptr,
'field_loader': field_loader.name,
'enum_name': enum_name,
'is_vector': is_vector,
'element_storage_type': element_storage_type,
'gen_setters_and_getters': gen_setters_and_getters,
}
class TreeGenerator(object):
"""Generates code to define tree objects.
"""
def __init__(self):
self.nodes = []
def AddNode(self,
name,
parent,
is_abstract=False,
fields=None,
extra_defs='',
comment=None,
use_custom_debug_string=False,
force_gen_init_fields=False):
"""Add a node class to be generated.
Args:
name: class name for this node
parent: class name of the parent node
is_abstract: true if this node is an abstract class
fields: list of fields in this class; created with Field function
extra_defs: extra c++ definitions to put in this class.
comment: Comment text for this node. Text will be stripped and
de-indented.
use_custom_debug_string: If True, generate prototype for overridden
SingleNodeDebugString method.
force_gen_init_fields: If True, generate the InitFields method even when
there are no fields to be added, so as to ensure there are no children
"""
if fields is None:
fields = []
if is_abstract:
class_final = ''
else:
class_final = 'final '
enum_name = NameToEnumName(name)
# generate init_fields if there is a least one is_node_ptr or
# is_vector field, or if force_gen_init_fields was requested.
gen_init_fields = force_gen_init_fields
for field in fields:
if field['is_node_ptr'] or field['is_vector']:
gen_init_fields = True
node_dict = ({
'name': name,
'parent': parent,
'class_final': class_final,
'is_abstract': is_abstract,
'comment': CleanComment(comment, prefix='// '),
'fields': fields,
'enum_name': enum_name,
'extra_defs': extra_defs.rstrip(),
'use_custom_debug_string': use_custom_debug_string,
'gen_init_fields': gen_init_fields})
self.nodes.append(node_dict)
def Generate(
self,
output_path,
h_template_path=None):
"""Materialize the template to generate the output file."""
jinja_env = jinja2.Environment(
undefined=jinja2.StrictUndefined,
autoescape=False,
trim_blocks=True,
lstrip_blocks=True,
line_statement_prefix='# ',
loader=jinja2.FileSystemLoader('', followlinks=True))
context = {
'nodes': self.nodes,
# For when we need to force a blank line and jinja wants to
# eat blank lines from the template.
'blank_line': '\n'
}
h_template = jinja_env.get_template(h_template_path)
out = open(output_path, 'wt')
out.write(Trim(h_template.render(context)))
out.close()
def main(argv):
if len(argv) != 3:
raise Exception(
'Usage: %s <output/path/to/parse_tree_generated.h> <input/path/to/parse_tree_generated.h.template>'
)
output_path = argv[1]
h_template_path = argv[2]
gen = TreeGenerator()
gen.AddNode(
name='ASTStatement',
parent='ASTNode',
is_abstract=True,
comment="""
Superclass of all Statements.
""",
extra_defs="""
bool IsStatement() const final { return true; }
bool IsSqlStatement() const override { return true; }
"""
)
gen.AddNode(
name='ASTQueryExpression',
parent='ASTNode',
is_abstract=True,
comment="""
Superclass for all query expressions. These are top-level syntactic
constructs (outside individual SELECTs) making up a query. These include
Query itself, Select, UnionAll, etc.
""",
extra_defs="""
bool IsQueryExpression() const override { return true; }
""",
fields=[
Field(
'parenthesized',
SCALAR_BOOL,
field_loader=FieldLoaderMethod.REQUIRED)
])
gen.AddNode(
name='ASTQuery',
parent='ASTQueryExpression',
fields=[
Field(
'with_clause',
'ASTWithClause',
comment="""
If present, the WITH clause wrapping this query.
"""),
Field(
'query_expr',
'ASTQueryExpression',
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
The query_expr can be a single Select, or a more complex structure
composed out of nodes like SetOperation and Query.
"""),
Field(
'order_by',
'ASTOrderBy',
comment="""
If present, applies to the result of <query_expr_> as appropriate.
"""),
Field(
'limit_offset',
'ASTLimitOffset',
comment="""
If present, this applies after the result of <query_expr_> and
<order_by_>.
"""),
Field('is_nested', SCALAR_BOOL),
Field(
'is_pivot_input',
SCALAR_BOOL,
comment="""
True if this query represents the input to a pivot clause.
""")
],
use_custom_debug_string=True
)
gen.AddNode(
name='ASTExpression',
parent='ASTNode',
is_abstract=True,
extra_defs="""
bool IsExpression() const override { return true; }
// Returns true if this expression is allowed to occur as a child of a
// comparison expression. This is not allowed for unparenthesized comparison
// expressions and operators with a lower precedence level (AND, OR, and NOT).
virtual bool IsAllowedInComparison() const { return true; }
""",
fields=[
Field(
'parenthesized',
SCALAR_BOOL,
field_loader=FieldLoaderMethod.REQUIRED)
])
gen.AddNode(
name='ASTQueryStatement',
parent='ASTStatement',
comment="""
Represents a single query statement.
""",
fields=[
Field(
'query',
'ASTQuery',
field_loader=FieldLoaderMethod.REQUIRED),
])
gen.AddNode(
name='ASTSelect',
parent='ASTQueryExpression',
use_custom_debug_string=True,
fields=[
Field(
'hint',
'ASTHint'),
Field(
'anonymization_options',
'ASTOptionsList'),
Field(
'distinct',
SCALAR_BOOL),
Field(
'select_as',
'ASTSelectAs'),
Field(
'select_list',
'ASTSelectList',
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'from_clause',
'ASTFromClause'),
Field(
'where_clause',
'ASTWhereClause'),
Field(
'group_by',
'ASTGroupBy'),
Field(
'having',
'ASTHaving'),
Field(
'qualify',
'ASTQualify'),
Field(
'window_clause',
'ASTWindowClause'),
])
gen.AddNode(
name='ASTSelectList',
parent='ASTNode',
fields=[
Field(
'columns',
'ASTSelectColumn',
field_loader=FieldLoaderMethod.REST_AS_REPEATED),
])
gen.AddNode(
name='ASTSelectColumn',
parent='ASTNode',
fields=[
Field(
'expression',
'ASTExpression',
field_loader=FieldLoaderMethod.REQUIRED),
Field(
'alias',
'ASTAlias')
])
gen.AddNode(
name='ASTLeaf',
parent='ASTExpression',
is_abstract=True,
use_custom_debug_string=True,
extra_defs="""
// image() references data with the same lifetime as this ASTLeaf object.
absl::string_view image() const { return image_; }
void set_image(std::string image) { image_ = std::move(image); }
bool IsLeaf() const override { return true; }
""",
# Triggers check that there were no children.
force_gen_init_fields=True,
fields=[
Field(
'image',
SCALAR_STRING,
gen_setters_and_getters=False)
])
gen.AddNode(
name='ASTIntLiteral',
parent='ASTLeaf',
extra_defs="""
bool is_hex() const;
""",
)
gen.AddNode(
name='ASTIdentifier',
parent='ASTExpression',
use_custom_debug_string=True,
extra_defs="""
// Set the identifier string. Input <identifier> is the unquoted identifier.
// There is no validity checking here. This assumes the identifier was
// validated and unquoted in zetasql.jjt.
void SetIdentifier(IdString identifier) {
id_string_ = identifier;
}
// Get the unquoted and unescaped string value of this identifier.
IdString GetAsIdString() const { return id_string_; }
std::string GetAsString() const { return id_string_.ToString(); }
absl::string_view GetAsStringView() const {
return id_string_.ToStringView();
}
""",
# Triggers check that there were no children.
force_gen_init_fields=True,
fields=[
Field(
'id_string',
SCALAR_ID_STRING,
gen_setters_and_getters=False)
])
gen.AddNode(
name='ASTAlias',
parent='ASTNode',
fields=[
Field(
'identifier',
'ASTIdentifier',
field_loader=FieldLoaderMethod.REQUIRED),
],
extra_defs="""
// Get the unquoted and unescaped string value of this alias.
std::string GetAsString() const;
absl::string_view GetAsStringView() const;
IdString GetAsIdString() const;
"""
)
gen.AddNode(
name='ASTGeneralizedPathExpression',
parent='ASTExpression',
is_abstract=True,
comment="""
Parent class that corresponds to the subset of ASTExpression nodes that are
allowed by the <generalized_path_expression> grammar rule. It allows for some
extra type safety vs. simply passing around ASTExpression as
<generalized_path_expression>s.
Only the following node kinds are allowed:
- AST_PATH_EXPRESSION
- AST_DOT_GENERALIZED_FIELD where the left hand side is a
<generalized_path_expression>.
- AST_DOT_IDENTIFIER where the left hand side is a
<generalized_path_expression>.
- AST_ARRAY_ELEMENT where the left hand side is a
<generalized_path_expression>
Note that the type system does not capture the "pureness constraint" that,
e.g., the left hand side of an AST_DOT_GENERALIZED_FIELD must be a
<generalized_path_expression> in order for the node. However, it is still
considered a bug to create a variable with type ASTGeneralizedPathExpression
that does not satisfy the pureness constraint (similarly, it is considered a
bug to call a function with an ASTGeneralizedPathExpression argument that
does not satisfy the pureness constraint).
""",
extra_defs="""
// Returns an error if 'path' contains a node that cannot come from the
// <generalized_path_expression> grammar rule.
static absl::Status VerifyIsPureGeneralizedPathExpression(
const ASTExpression* path);
""")
gen.AddNode(
name='ASTPathExpression',
parent='ASTGeneralizedPathExpression',
comment="""
This is used for dotted identifier paths only, not dotting into
arbitrary expressions (see ASTDotIdentifier below).
""",
fields=[
Field(
'names',
'ASTIdentifier',
field_loader=FieldLoaderMethod.REST_AS_REPEATED,
gen_setters_and_getters=False),
],
# The existing API unfortunately uses name(int i) rather than names(int i)
extra_defs="""
const int num_names() const { return names_.size(); }
const absl::Span<const ASTIdentifier* const>& names() const {
return names_;
}
const ASTIdentifier* name(int i) const { return names_[i]; }
const ASTIdentifier* first_name() const { return names_.front(); }
const ASTIdentifier* last_name() const { return names_.back(); }
// Return this PathExpression as a dotted SQL identifier string, with
// quoting if necessary. If <max_prefix_size> is non-zero, include at most
// that many identifiers from the prefix of <path>.
std::string ToIdentifierPathString(size_t max_prefix_size = 0) const;
// Return the vector of identifier strings (without quoting).
std::vector<std::string> ToIdentifierVector() const;
// Similar to ToIdentifierVector(), but returns a vector of IdString's,
// avoiding the need to make copies.
std::vector<IdString> ToIdStringVector() const;
"""
)
gen.AddNode(
name='ASTTableExpression',
parent='ASTNode',
is_abstract=True,
comment="""
Superclass for all table expressions. These are things that appear in the
from clause and produce a stream of rows like a table.
This includes table scans, joins and subqueries.
""",
extra_defs="""
bool IsTableExpression() const override { return true; }
// Return the alias, if the particular subclass has one.
virtual const ASTAlias* alias() const { return nullptr; }
// Return the ASTNode location of the alias for this table expression,
// if applicable.
const ASTNode* alias_location() const;
"""
)
gen.AddNode(
name='ASTTablePathExpression',
parent='ASTTableExpression',
comment="""
TablePathExpression are the TableExpressions that introduce a single scan,
referenced by a path expression or UNNEST, and can optionally have
aliases, hints, and WITH OFFSET.
""",
fields=[
Field(
'path_expr',
'ASTPathExpression',
comment="""
One of path_exp or path_exp must be non-NULL but not both.
"""),
Field(
'unnest_expr',
'ASTUnnestExpression'),
Field(
'hint',
'ASTHint'),
Field(
'alias',
'ASTAlias',
# Existing API getter specifies "override"
gen_setters_and_getters=False),
Field(
'with_offset',
'ASTWithOffset',
comment="""
Present if the scan had WITH OFFSET.
"""),
Field(
'pivot_clause',
'ASTPivotClause',
comment="""
One of pivot_clause or unpivot_clause can be present but not both.
"""),
Field(
'unpivot_clause',
'ASTUnpivotClause'),
Field(
'for_system_time',
'ASTForSystemTime'),
Field(
'sample_clause',
'ASTSampleClause'),
],
extra_defs="""
const ASTAlias* alias() const override { return alias_; }
"""
)
gen.AddNode(
name='ASTFromClause',
parent='ASTNode',
fields=[
Field(
'table_expression',
'ASTTableExpression',
field_loader=FieldLoaderMethod.REQUIRED,
comment="""
A FromClause has exactly one TableExpression child.
If the FROM clause has commas, they will be expressed as a tree
of ASTJoin nodes with join_type=COMMA.
"""),
],
)
gen.AddNode(
name='ASTWhereClause',
parent='ASTNode',
fields=[
Field(
'expression',
'ASTExpression',
field_loader=FieldLoaderMethod.REQUIRED),
],
)
gen.AddNode(
name='ASTBooleanLiteral',
parent='ASTLeaf',
fields=[
Field(
'value',
SCALAR_BOOL),
],
)
gen.AddNode(
name='ASTAndExpr',
parent='ASTExpression',
fields=[
Field(
'conjuncts',
'ASTExpression',
field_loader=FieldLoaderMethod.REST_AS_REPEATED),
],
extra_defs="""
bool IsAllowedInComparison() const override { return parenthesized(); }
"""
)
gen.Generate(
output_path,
h_template_path=h_template_path)
if __name__ == '__main__':
app.run(main)
| 29.966276 | 107 | 0.614816 |
ace6540fe71b0c6061c427aec6f10adbea606dfd | 4,321 | py | Python | homeassistant/components/hitron_coda/device_tracker.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 6 | 2020-07-18T16:33:25.000Z | 2021-09-26T09:52:04.000Z | homeassistant/components/hitron_coda/device_tracker.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 58 | 2020-08-03T07:33:02.000Z | 2022-03-31T06:02:05.000Z | homeassistant/components/hitron_coda/device_tracker.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 14 | 2018-08-19T16:28:26.000Z | 2021-09-02T18:26:53.000Z | """Support for the Hitron CODA-4582U, provided by Rogers."""
from collections import namedtuple
import logging
import requests
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_TYPE,
CONF_USERNAME,
HTTP_OK,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_TYPE = "rogers"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TYPE, default=DEFAULT_TYPE): cv.string,
}
)
def get_scanner(_hass, config):
"""Validate the configuration and return a Hitron CODA-4582U scanner."""
scanner = HitronCODADeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple("Device", ["mac", "name"])
class HitronCODADeviceScanner(DeviceScanner):
"""This class scans for devices using the CODA's web interface."""
def __init__(self, config):
"""Initialize the scanner."""
self.last_results = []
host = config[CONF_HOST]
self._url = f"http://{host}/data/getConnectInfo.asp"
self._loginurl = f"http://{host}/goform/login"
self._username = config.get(CONF_USERNAME)
self._password = config.get(CONF_PASSWORD)
if config.get(CONF_TYPE) == "shaw":
self._type = "pwd"
else:
self._type = "pws"
self._userid = None
self.success_init = self._update_info()
_LOGGER.info("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the device with the given MAC address."""
name = next(
(result.name for result in self.last_results if result.mac == device), None
)
return name
def _login(self):
"""Log in to the router. This is required for subsequent api calls."""
_LOGGER.info("Logging in to CODA...")
try:
data = [("user", self._username), (self._type, self._password)]
res = requests.post(self._loginurl, data=data, timeout=10)
except requests.exceptions.Timeout:
_LOGGER.error("Connection to the router timed out at URL %s", self._url)
return False
if res.status_code != HTTP_OK:
_LOGGER.error("Connection failed with http code %s", res.status_code)
return False
try:
self._userid = res.cookies["userid"]
return True
except KeyError:
_LOGGER.error("Failed to log in to router")
return False
def _update_info(self):
"""Get ARP from router."""
_LOGGER.info("Fetching...")
if self._userid is None:
if not self._login():
_LOGGER.error("Could not obtain a user ID from the router")
return False
last_results = []
# doing a request
try:
res = requests.get(self._url, timeout=10, cookies={"userid": self._userid})
except requests.exceptions.Timeout:
_LOGGER.error("Connection to the router timed out at URL %s", self._url)
return False
if res.status_code != HTTP_OK:
_LOGGER.error("Connection failed with http code %s", res.status_code)
return False
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.error("Failed to parse response from router")
return False
# parsing response
for info in result:
mac = info["macAddr"]
name = info["hostName"]
# No address = no item :)
if mac is None:
continue
last_results.append(Device(mac.upper(), name))
self.last_results = last_results
_LOGGER.info("Request successful")
return True
| 30.429577 | 87 | 0.614904 |
ace654fbfb909161b50e7536104719409219a989 | 1,172 | py | Python | openmdao/test_suite/components/array_comp.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | null | null | null | openmdao/test_suite/components/array_comp.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | 1 | 2015-08-12T17:58:18.000Z | 2015-08-12T17:58:18.000Z | openmdao/test_suite/components/array_comp.py | toddrme2178/OpenMDAO | 379cc6216d13d380e11cb3a46f03960981de4660 | [
"Apache-2.0"
] | 1 | 2021-01-17T14:03:48.000Z | 2021-01-17T14:03:48.000Z | import numpy as np
import openmdao.api as om
class ArrayComp(om.ExplicitComponent):
def setup(self):
J1 = np.array([[1.0, 3.0, -2.0, 7.0],
[6.0, 2.5, 2.0, 4.0],
[-1.0, 0.0, 8.0, 1.0],
[1.0, 4.0, -5.0, 6.0]])
self.J1 = J1
self.J2 = J1 * 3.3
self.Jb = J1.T
# Inputs
self.add_input('x1', np.zeros([4]))
self.add_input('x2', np.zeros([4]))
self.add_input('bb', np.zeros([4]))
# Outputs
self.add_output('y1', np.zeros([4]))
self.declare_partials(of='*', wrt='*')
self.set_check_partial_options('x*', directional=True)
self.exec_count = 0
def compute(self, inputs, outputs):
"""
Execution.
"""
outputs['y1'] = self.J1.dot(inputs['x1']) + self.J2.dot(inputs['x2']) + self.Jb.dot(inputs['bb'])
self.exec_count += 1
def compute_partials(self, inputs, partials):
"""
Analytical derivatives.
"""
partials[('y1', 'x1')] = self.J1
partials[('y1', 'x2')] = self.J2
partials[('y1', 'bb')] = self.Jb
| 25.478261 | 105 | 0.480375 |
ace65522fcfb03690930c2f7987358fa6db76346 | 1,619 | py | Python | python-framework/libs/loader.py | huangxingx/python-framework | a62618b0ee5ecff9de426327892cdd690d10510d | [
"MIT"
] | 7 | 2019-10-24T03:26:22.000Z | 2019-10-27T14:55:07.000Z | python-framework/libs/loader.py | PJoemu/python-framework | a62618b0ee5ecff9de426327892cdd690d10510d | [
"MIT"
] | 3 | 2021-06-08T19:13:10.000Z | 2022-01-13T00:38:48.000Z | python-framework/libs/loader.py | PJoemu/python-framework | a62618b0ee5ecff9de426327892cdd690d10510d | [
"MIT"
] | 2 | 2019-10-25T03:54:51.000Z | 2020-06-28T08:50:12.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
# @author: x.huang
# @date:17-8-11
import tornadoredis
from pony.orm import Database, sql_debug
import setting
class DBConnection(object):
db = None
def __init__(self):
DBConnection.db = Database(provider='mysql', host=setting.sql_host, user=setting.sql_user,
passwd=setting.sql_password,
db=setting.sql_db)
@classmethod
def load_db(cls):
sql_debug(setting.SQL_DEBUG)
cls.db.generate_mapping(create_tables=True)
return cls.db
cache_para_dict = dict(host=setting.cache_host, port=setting.cache_port)
class CacheConnection(object):
@staticmethod
def load_cache(driver_type=None):
if driver_type == 'tornadoredis':
connection_class = TornadoRedisConnection
else:
connection_class = RedisConnection
return connection_class.load_cache()
class TornadoRedisConnection(object):
CONNECTION_POOL = tornadoredis.ConnectionPool(max_connections=500, wait_for_available=True)
_client = None
@classmethod
def load_cache(cls):
if not cls._client:
cls._client = tornadoredis.Client(connection_pool=cls.CONNECTION_POOL, **cache_para_dict)
return cls._client
class RedisConnection(object):
CONNECTION_POOL = redis.ConnectionPool(**cache_para_dict)
_client = None
@classmethod
def load_cache(cls):
if not cls._client:
cls._client = redis.Redis(connection_pool=cls.CONNECTION_POOL)
return cls._client
| 25.698413 | 101 | 0.673255 |
ace65715095666c60f3edf01e44795a5d2fe23ed | 13,790 | py | Python | faceAssistant.py | EscVM/Virtual_Security_Assistant | e0be2eec2599aaf6d74a0201fe62394d3e0e8dc6 | [
"MIT"
] | 13 | 2019-12-23T00:16:54.000Z | 2022-01-27T07:03:02.000Z | faceAssistant.py | EscVM/Virtual_Security_Assistant | e0be2eec2599aaf6d74a0201fe62394d3e0e8dc6 | [
"MIT"
] | null | null | null | faceAssistant.py | EscVM/Virtual_Security_Assistant | e0be2eec2599aaf6d74a0201fe62394d3e0e8dc6 | [
"MIT"
] | 4 | 2020-12-19T05:28:29.000Z | 2022-01-30T05:36:26.000Z | #--------------------------
#Date: 19/12/2019
#Place: Turin, PIC4SeR
#Author: Fra, Vitto
#Project: faceAssistant
#---------------------------
import os, time, datetime
import pickle, json
import numpy as np
import cv2
from textwrap import dedent
from faceNet.MyMQTT import MyMQTT
from faceNet.MyCam import MyCam
from faceNet.Timer import Timer
from faceNet.profileNet import profileNet
from faceNet.faceNet import faceNet
class FaceNet():
def __init__(self,conf_file="conf.json"):
self.read_configuration(conf_file)
print(self.logo)
self.get_seen() # get seen dictionary from file
self.MQTT_initialize()
try:
self.database = pickle.loads(open(self.data_file, "rb").read()) # read the storage database
print("[INFO] Embeddings file imported.\n")
except:
raise FileNotFoundError("[Error] Encodings file not found. Generate it with 'imagesAcquisition.py'.")
if self.classifier: #knn model
try:
self.knn_model = pickle.loads(open(self.classifier_model_path, "rb").read()) # read the storage database
print("[INFO] Knn classifier model imported.\n")
except:
raise FileNotFoundError("[Error] Knn classifier model not found. Generate it with 'imagesAcquisition.py'.")
self.cam = MyCam(self.cameras_file,self.default_camera)
print("[INFO] Creating Tensorflow models...\n")
self.profileNet = profileNet(self.profile_model)
self.model = faceNet(self.bb_model,self.emb_model)
self.counter = 0
def run(self):
self.del_old_thrd = Timer('deleteOld',"00.00",self.delete_old) # thread that deletes the old seen every day at midnight
self.del_old_thrd.start()
cv2.namedWindow('Camera', cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty('Camera', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
self.ready = True
self.frame = np.zeros((480,640,3),dtype='int8') #default frame
while True:
if self.ready:
img = self.get_frame()
boxes,names = self.detect(img)
if boxes.size:
self.update_seen(names)
img = self.draw_boxes(boxes,names)
if not self.show(img,1):
break
else:
time.sleep(0.5)
self.cam.release()
quit()
def read_configuration(self,conf_file):
conf = json.loads(open(conf_file,'r').read())
self.ROOT_DIR = os.path.abspath('')
self.logo = open(conf["logo_file"]).read()
self.data_file = conf["database_file"]
self.seen_file = conf["seen_file"]
self.cameras_file = conf["cameras_file"]
self.default_camera = conf["default_camera_index"]
self.bb_model = conf["bb_model"]
self.emb_model = conf["emb_model"]
self.profile_model = conf["profile_model"]
self.classifier = conf["classifier"]
if self.classifier:
self.classifier_model_path = conf["classifier_model"]
self.frame_width = conf["frame_max_width"]
self.blur = conf["blur"]
self.unknown_color = conf["unknown_color"]
self.show_fps = conf["show_fps"]
if self.show_fps:
self.previous_prediction_time = time.time()
self.line_width = conf["box_line_width"]
self.font_dim = conf["font_dim"]
self.MQTT_ID = conf["MQTT_ID"]
self.MQTT_broker = conf["MQTT_broker"]
self.MQTT_user = conf["MQTT_user"]
self.MQTT_pwd = conf["MQTT_pwd"]
self.MQTT_topic = conf["MQTT_topic"]
def MQTT_initialize(self):
self.MQTTclient = MyMQTT(self.MQTT_ID , self.MQTT_broker, self.MQTT_user, self.MQTT_pwd, self.dispatch)
self.MQTTclient.start()
#wait for connnection
while not self.MQTTclient.is_connected:
time.sleep(0.1)
#subscribe
for topic in self.MQTT_topic:
self.MQTTclient.subscribe(topic)
while sum(self.MQTTclient.is_subscribed) < len(self.MQTT_topic):
time.sleep(0.1)
def get_seen(self):
try:
self.seen = json.loads(open(self.seen_file, "r").read())
except:
print("[INFO] New seen.json file.\n")
self.reset_seen()
def reset_seen(self):
self.seen = {"list":{}} # generate empty seen dataframe
self.update_seen() # generate empty seen file
def delete_old(self):
#max 7 days
maxdate = datetime.date.today() - datetime.timedelta(days=7)
seen = self.seen.copy()
for t in seen["list"]:
if datetime.strptime(t, "%Y %m %d") <= maxdate:
del self.seen["list"][t]
def dispatch(self,message):
"""
Dispatch function for MQTT messages.
:param message: MQTT message with JSON payload
"""
topic = message.topic
message = json.loads(message.payload.decode())
print("[INFO] Received message on topic " + topic)
if "camera" in topic:
self.change_camera(message)
def change_camera(self,message):
"""
Change the camera.
:param message: dictionary with 'camera' key -> camera ID to be selected, 't' key -> timestamp of the message
"""
self.ready = False
n = int(message.get('camera'))
t = message.get('time')
print("\n[INFO] " + time.ctime(t) + " Selected camera number " + str(n) + ".\n")
self.cam.change(n)
self.ready = True
def get_frame(self):
"""
Get the the image from the camera object. If the freame read fails it uses the previous frame. If it fails more than 10 times it tries to reattach the camera. It sets the self.frame attribute with the read image and returns the frame used for face localization and detection.
:return: The frame.
"""
frame = self.cam.read()
if not frame[0]:
if self.counter >= 10:
self.cam.reattach()
self.counter = -1
frame = self.frame
print('\n[INFO] Using previous frame.')
self.counter += 1
else:
self.counter = 0
frame = frame[1]
if self.cam.flip: #usually we want to flip webcams horizontally
frame = cv2.flip(frame, 1)
self.frame = frame #frame that will be dispalyed
if frame.shape[1]>self.frame_width:
self.r = frame.shape[1] / self.frame_width
frame = cv2.resize(frame, (self.frame_width,int(frame.shape[0]/self.r)), interpolation=cv2.INTER_AREA)
else:
self.r = 1
return frame #frame used as tensorflow input
def detect(self,frame=None):
"""
Execute the detection and recognition algorithms with an image.
:param frame: The image.
:return: A tuple with the detected bounding boxes and the associated names.
"""
if frame is None:
return (None,None)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input frame, then compute
# the facial embeddings for each face
boxes,landmarks = self.model.get_face_locations(frame,self.cam.minsize,self.cam.factor,self.cam.thr)
encodings = self.face_encodings(frame, boxes)
names = []
if self.classifier and not np.sum([1 for enc in encodings if enc is None]): # KNN model only if we have front faces
# Check threshold to choose between known and unknown
# only the front has to be considered
indexes = np.array([i for i in range(len(encodings)) if np.any(encodings[i])])
closest_distances = self.knn_model.kneighbors(np.array(encodings)[indexes], n_neighbors=1)[0]
are_matches = [closest_distances[int(np.where(indexes==i)[0])][0] <= self.cam.distance_thr if i in indexes
else None for i in range(len(encodings))]
# Predict classes and remove classifications that aren't within the threshold
names = [name if rec else "Unknown" for name,rec in zip(self.knn_model.predict(encodings), are_matches)]
return (boxes,names)
# euclidean distances
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
name = 'Unknown' #default name
if encoding is None:
names.append(name)
continue
matches = {}
#loop over the storage embeddings
for db_name in self.database:
person = self.database[db_name]
person_match = self.model.compare_faces(person["encodings"], encoding,self.cam.distance_thr) #default is 0.6
matches[db_name] = sum(person_match)
#get name of maximum match
if matches[max(matches,key=matches.get)]:
name = max(matches,key=matches.get)
# update the list of names
names.append(' '.join([i.capitalize() for i in name.split('_')])) # write the name in a good way
return (boxes,names)
def face_encodings(self,face_image, known_face_locations=None):
"""
Given an image, return the 256-dimension face encoding for each face in the image. Function redefined from face_recognition to add side/front classification and use large model for facial landmarks.
:param face_image: The image that contains one or more faces.
:param known_face_locations: The bounding boxes of each face if you already know them.
:return: A list of 256-dimensional face encodings (one for each face in the image).
"""
if not known_face_locations.size:
return np.array([])
#if side faces: no landmarks and no encodings
faces = self.model.get_faces(self.frame,known_face_locations*self.r)
norm_faces = self.profileNet.normalize(faces.copy())
are_front = self.profileNet.predict(norm_faces,self.cam.profile_thr)
return np.array([self.model.get_embeddings(faces[i:i+1])[0] if are_front[i] else None
for i in range(len(known_face_locations))])
def update_seen(self,names=[]):
date = datetime.date.today().strftime("%Y %m %d")
if date not in self.seen["list"]:
self.seen["list"][date] = []
for name in names:
seen_names_list = [d['name'] for d in self.seen["list"][date]]
if name not in seen_names_list:
new_seen = {"name":name,"time":time.time()}
self.seen["list"][date].append(new_seen)
else:
index = seen_names_list.index(name)
self.seen["list"][date][index]["time"] = time.time()
# update seen file
self.seen["updated"] = time.time()
save_file = open(self.seen_file, "w+")
json.dump(self.seen,save_file)
save_file.close()
def draw_boxes(self,boxes=[],names=[]):
if self.frame is None:
return None
frame = self.frame.copy()
if self.show_fps: # write fps
fps = 1./(time.time() - self.previous_prediction_time)
self.previous_prediction_time = time.time()
text = "FPS: {:.2f}".format(fps)
cv2.putText(frame, text, (5,20), cv2.FONT_HERSHEY_SIMPLEX, self.font_dim, self.unknown_color, 2) #top left corner
# draw the faces boxes with names
for ((left, top, right, bottom, conf), name) in zip(boxes, names):
left = np.maximum(left,0)
top = np.maximum(top,0)
right = np.minimum(right,frame.shape[1])
bottom = np.minimum(bottom,frame.shape[0])
top = int(top*self.r); right = int(right*self.r); bottom = int(bottom*self.r); left = int(left*self.r)
if not name == 'Unknown':
color = self.database['_'.join(name.lower().split())]["color"]
else:
color = self.unknown_color
if self.blur: #blur unknown faces
name = '' #no name
face_image = frame[top:bottom, left:right]
face_image = cv2.GaussianBlur(face_image, (99, 99), 30)
frame[top:bottom, left:right] = face_image
cv2.rectangle(frame, (left, top), (right, bottom), color, self.line_width)
y = top - 15 if top - 15 > 15 else top + 20
cv2.putText(frame, name, (left + 5, y), cv2.FONT_HERSHEY_SIMPLEX, self.font_dim, color, 2)
return frame
def show(self,frame,delay=0):
"""
Visualized the modified frame.
:param frame: The frame to be modified.
:return: 0 if we want to stop the execution, 1 else
"""
cv2.imshow("Camera", frame)
k = cv2.waitKey(delay) & 0xFF
if k == 27:
cv2.destroyAllWindows()
return 0 # if the `esc` key was pressed, break from the loop
elif k == ord('c'):
self.cam.switch()
return 1
if __name__ == "__main__":
model = FaceNet()
model.run()
| 37.069892 | 283 | 0.577302 |
ace6576598a74dd59317f07fcbdb5a295e857a1f | 84,229 | py | Python | bzt/modules/blazemeter.py | mryan43/taurus | 27e43d55b96a2b912470475d412faffafb9f62ae | [
"Apache-2.0"
] | null | null | null | bzt/modules/blazemeter.py | mryan43/taurus | 27e43d55b96a2b912470475d412faffafb9f62ae | [
"Apache-2.0"
] | null | null | null | bzt/modules/blazemeter.py | mryan43/taurus | 27e43d55b96a2b912470475d412faffafb9f62ae | [
"Apache-2.0"
] | null | null | null | """
Module for reporting into http://www.blazemeter.com/ service
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import logging
import os
import platform
import re
import sys
import time
import traceback
import zipfile
from abc import abstractmethod
from collections import defaultdict, OrderedDict, Counter, namedtuple
from functools import wraps
from ssl import SSLError
import requests
import yaml
from requests.exceptions import ReadTimeout
from terminaltables import SingleTable, AsciiTable
from urwid import Pile, Text
from bzt import AutomatedShutdown
from bzt import TaurusInternalException, TaurusConfigError, TaurusException, TaurusNetworkError, NormalShutdown
from bzt.bza import User, Session, Test, Workspace, MultiTest, BZA_TEST_DATA_RECEIVED
from bzt.engine import Reporter, Provisioning, ScenarioExecutor, Configuration, Service
from bzt.engine import Singletone, SETTINGS
from bzt.modules.aggregator import DataPoint, KPISet, ConsolidatingAggregator, ResultsProvider, AggregatorListener
from bzt.modules.console import WidgetProvider, PrioritizedWidget
from bzt.modules.functional import FunctionalResultsReader, FunctionalAggregator, FunctionalSample
from bzt.modules.monitoring import Monitoring, MonitoringListener, LocalClient
from bzt.modules.services import Unpacker
from bzt.modules.selenium import SeleniumExecutor
from bzt.requests_model import has_variable_pattern
from bzt.six import BytesIO, iteritems, HTTPError, r_input, URLError, b, string_types, text_type
from bzt.utils import open_browser, BetterDict, ExceptionalDownloader, ProgressBarContext
from bzt.utils import to_json, dehumanize_time, get_full_path, get_files_recursive, replace_in_config, humanize_bytes
TAURUS_TEST_TYPE = "taurus"
FUNC_TEST_TYPE = "functionalApi"
CLOUD_CONFIG_BLACK_LIST = {
"settings": {
"artifacts-dir": True,
"aggregator": True,
"proxy": True,
"check-updates": True
},
"included-configs": True,
"cli": True,
"cli-aliases": True,
"install-id": True,
"version": True,
"modules": {
"jmeter": {
"path": True
},
"ab": {
"path": True
},
"gatling": {
"path": True
},
"grinder": {
"path": True
},
"junit": {
"path": True
},
"molotov": {
"path": True
},
"siege": {
"path": True
},
"testng": {
"path": True
},
"tsung": {
"path": True
},
"console": {
"disable": True,
},
"blazemeter": {
"address": True,
"data-address": True,
"token": True,
},
"cloud": {
"address": True,
"data-address": True,
"token": True,
},
},
"provisioning": True,
}
NETWORK_PROBLEMS = (IOError, URLError, SSLError, ReadTimeout, TaurusNetworkError)
NOTE_SIZE_LIMIT = 2048
def send_with_retry(method):
@wraps(method)
def _impl(self, *args, **kwargs):
if not isinstance(self, BlazeMeterUploader):
raise TaurusInternalException("send_with_retry should only be applied to BlazeMeterUploader methods")
try:
method(self, *args, **kwargs)
except (IOError, TaurusNetworkError):
self.log.debug("Error sending data: %s", traceback.format_exc())
self.log.warning("Failed to send data, will retry in %s sec...", self._user.timeout)
try:
time.sleep(self._user.timeout)
method(self, *args, **kwargs)
self.log.info("Succeeded with retry")
except NETWORK_PROBLEMS:
self.log.error("Fatal error sending data: %s", traceback.format_exc())
self.log.warning("Will skip failed data and continue running")
return _impl
def get_with_retry(method):
@wraps(method)
def _impl(self, *args, **kwargs):
if not isinstance(self, CloudProvisioning):
raise TaurusInternalException("get_with_retry should only be applied to CloudProvisioning class methods")
while True:
try:
return method(self, *args, **kwargs)
except NETWORK_PROBLEMS:
self.log.debug("Error making request: %s", traceback.format_exc())
self.log.warning("Failed to make request, will retry in %s sec...", self.user.timeout)
time.sleep(self.user.timeout)
return _impl
def parse_blazemeter_test_link(link):
"""
https://a.blazemeter.com/app/#/accounts/97961/workspaces/89846/projects/229969/tests/5823512
:param link:
:return:
"""
if not isinstance(link, (string_types, text_type)):
return None
regex = r'https://a.blazemeter.com/app/#/accounts/(\d+)/workspaces/(\d+)/projects/(\d+)/tests/(\d+)(?:/\w+)?'
match = re.match(regex, link)
if match is None:
return None
TestParams = namedtuple('TestParams', 'account_id,workspace_id,project_id,test_id')
return TestParams(*[int(x) for x in match.groups()])
class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singletone):
"""
Reporter class
:type _test: bzt.bza.Test
:type _master: bzt.bza.Master
:type _session: bzt.bza.Session
"""
def __init__(self):
super(BlazeMeterUploader, self).__init__()
self.browser_open = 'start'
self.kpi_buffer = []
self.send_interval = 30
self._last_status_check = time.time()
self.send_data = True
self.upload_artifacts = True
self.send_monitoring = True
self.monitoring_buffer = None
self.public_report = False
self.last_dispatch = 0
self.results_url = None
self._user = User()
self._test = None
self._master = None
self._session = None
self.first_ts = sys.maxsize
self.last_ts = 0
self.report_name = None
self._dpoint_serializer = DatapointSerializer(self)
def prepare(self):
"""
Read options for uploading, check that they're sane
"""
super(BlazeMeterUploader, self).prepare()
self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval))
self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring)
monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500)
self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log)
self.browser_open = self.settings.get("browser-open", self.browser_open)
self.public_report = self.settings.get("public-report", self.public_report)
self._dpoint_serializer.multi = self.settings.get("report-times-multiplier", self._dpoint_serializer.multi)
token = self.settings.get("token", "")
if not token:
self.log.warning("No BlazeMeter API key provided, will upload anonymously")
self._user.token = token
# usual fields
self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit)
self._user.address = self.settings.get("address", self._user.address).rstrip("/")
self._user.data_address = self.settings.get("data-address", self._user.data_address).rstrip("/")
self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout))
if isinstance(self._user.http_session, requests.Session):
self.log.debug("Installing http client")
self._user.http_session = self.engine.get_http_client()
self._user.http_request = self._user.http_session.request
# direct data feeding case
sess_id = self.parameters.get("session-id")
if sess_id:
self._session = Session(self._user, {'id': sess_id})
self._session['userId'] = self.parameters.get("user-id", None)
self._session['testId'] = self.parameters.get("test-id", None)
self._test = Test(self._user, {'id': self._session['testId']})
exc = TaurusConfigError("Need signature for session")
self._session.data_signature = self.parameters.get("signature", exc)
self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target)
self.send_data = self.parameters.get("send-data", self.send_data)
self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts)
else:
try:
self._user.ping() # to check connectivity and auth
except HTTPError:
self.log.error("Cannot reach online results storage, maybe the address/token is wrong")
raise
if token:
wsp = self._user.accounts().workspaces()
if not wsp:
raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log)
self._test = finder.resolve_external_test()
else:
self._test = Test(self._user, {'id': None})
self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name))
if self.report_name == 'ask' and sys.stdin.isatty():
self.report_name = r_input("Please enter report-name: ")
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
for service in self.engine.services:
if isinstance(service, Monitoring):
service.add_listener(self)
def startup(self):
"""
Initiate online test
"""
super(BlazeMeterUploader, self).startup()
self._user.log = self.log.getChild(self.__class__.__name__)
if not self._session:
url = self._start_online()
self.log.info("Started data feeding: %s", url)
if self.browser_open in ('start', 'both'):
open_browser(url)
if self._user.token and self.public_report:
report_link = self._master.make_report_public()
self.log.info("Public report link: %s", report_link)
def _start_online(self):
"""
Start online test
"""
self.log.info("Initiating data feeding...")
if self._test['id']:
self._session, self._master = self._test.start_external()
else:
self._session, self._master, self.results_url = self._test.start_anonymous_external_test()
self._test['id'] = self._session['testId']
if self._test.token:
self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id']
if self.report_name:
self._session.set({"name": str(self.report_name)})
return self.results_url
def __get_jtls_and_more(self):
"""
Compress all files in artifacts dir to single zipfile
:rtype: (bzt.six.BytesIO,dict)
"""
mfile = BytesIO()
listing = {}
logs = set()
for handler in self.engine.log.parent.handlers:
if isinstance(handler, logging.FileHandler):
logs.add(handler.baseFilename)
max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024 # 10MB
with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh:
for root, _, files in os.walk(self.engine.artifacts_dir):
for filename in files:
full_path = os.path.join(root, filename)
if full_path in logs:
logs.remove(full_path)
fsize = os.path.getsize(full_path)
if fsize <= max_file_size:
zfh.write(full_path, os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename))
listing[full_path] = fsize
else:
msg = "File %s exceeds maximum size quota of %s and won't be included into upload"
self.log.warning(msg, filename, max_file_size)
for filename in logs: # upload logs unconditionally
zfh.write(filename, os.path.basename(filename))
listing[filename] = os.path.getsize(filename)
return mfile, listing
def __upload_artifacts(self):
"""
If token provided, upload artifacts folder contents and bzt.log
"""
if not self._session.token:
return
worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL')
if worker_index:
suffix = '-%s' % worker_index
else:
suffix = ''
artifacts_zip = "artifacts%s.zip" % suffix
mfile, zip_listing = self.__get_jtls_and_more()
self.log.info("Uploading all artifacts as %s ...", artifacts_zip)
self._session.upload_file(artifacts_zip, mfile.getvalue())
self._session.upload_file(artifacts_zip + '.tail.bz', self.__format_listing(zip_listing))
handlers = self.engine.log.parent.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler):
fname = handler.baseFilename
self.log.info("Uploading %s", fname)
fhead, ftail = os.path.splitext(os.path.split(fname)[-1])
modified_name = fhead + suffix + ftail
with open(fname, 'rb') as _file:
self._session.upload_file(modified_name, _file.read())
_file.seek(-4096, 2)
tail = _file.read()
tail = tail[tail.index(b("\n")) + 1:]
self._session.upload_file(modified_name + ".tail.bz", tail)
def post_process(self):
"""
Upload results if possible
"""
if not self._session:
self.log.debug("No feeding session obtained, nothing to finalize")
return
self.log.debug("KPI bulk buffer len in post-proc: %s", len(self.kpi_buffer))
try:
self.log.info("Sending remaining KPI data to server...")
if self.send_data:
self.__send_data(self.kpi_buffer, False, True)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
finally:
self._postproc_phase2()
if self.results_url:
if self.browser_open in ('end', 'both'):
open_browser(self.results_url)
self.log.info("Online report link: %s", self.results_url)
def _postproc_phase2(self):
try:
if self.upload_artifacts:
self.__upload_artifacts()
except (IOError, TaurusNetworkError):
self.log.warning("Failed artifact upload: %s", traceback.format_exc())
finally:
self._last_status_check = self.parameters.get('forced-last-check', self._last_status_check)
self.log.debug("Set last check time to: %s", self._last_status_check)
tries = self.send_interval # NOTE: you dirty one...
while not self._last_status_check and tries > 0:
self.log.info("Waiting for ping...")
time.sleep(self.send_interval)
tries -= 1
self._postproc_phase3()
def _postproc_phase3(self):
try:
if self.send_data:
self.end_online()
if self._user.token and self.engine.stopping_reason:
exc_class = self.engine.stopping_reason.__class__.__name__
note = "%s: %s" % (exc_class, str(self.engine.stopping_reason))
self.append_note_to_session(note)
if self._master:
self.append_note_to_master(note)
except KeyboardInterrupt:
raise
except BaseException as exc:
self.log.debug("Failed to finish online: %s", traceback.format_exc())
self.log.warning("Failed to finish online: %s", exc)
def end_online(self):
"""
Finish online test
"""
if not self._session:
self.log.debug("Feeding not started, so not stopping")
else:
self.log.info("Ending data feeding...")
if self._user.token:
self._session.stop()
else:
self._session.stop_anonymous()
def append_note_to_session(self, note):
self._session.fetch()
if 'note' in self._session:
note = self._session['note'] + '\n' + note
note = note.strip()
if note:
self._session.set({'note': note[:NOTE_SIZE_LIMIT]})
def append_note_to_master(self, note):
self._master.fetch()
if 'note' in self._master:
note = self._master['note'] + '\n' + note
note = note.strip()
if note:
self._master.set({'note': note[:NOTE_SIZE_LIMIT]})
def check(self):
"""
Send data if any in buffer
"""
self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer))
if self.last_dispatch < (time.time() - self.send_interval):
self.last_dispatch = time.time()
if self.send_data and len(self.kpi_buffer):
self.__send_data(self.kpi_buffer)
self.kpi_buffer = []
if self.send_monitoring:
self.__send_monitoring()
return super(BlazeMeterUploader, self).check()
@send_with_retry
def __send_data(self, data, do_check=True, is_final=False):
"""
:type data: list[bzt.modules.aggregator.DataPoint]
"""
if not self._session:
return
serialized = self._dpoint_serializer.get_kpi_body(data, is_final)
self._session.send_kpi_data(serialized, do_check)
def aggregated_second(self, data):
"""
Send online data
:param data: DataPoint
"""
if self.send_data:
self.kpi_buffer.append(data)
def monitoring_data(self, data):
if self.send_monitoring:
self.monitoring_buffer.record_data(data)
@send_with_retry
def __send_monitoring(self):
engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '')
if not engine_id:
engine_id = "0"
data = self.monitoring_buffer.get_monitoring_json(self._session)
self._session.send_monitoring_data(engine_id, data)
def __format_listing(self, zip_listing):
lines = []
for fname in sorted(zip_listing.keys()):
bytestr = humanize_bytes(zip_listing[fname])
if fname.startswith(self.engine.artifacts_dir):
fname = fname[len(self.engine.artifacts_dir) + 1:]
lines.append(bytestr + " " + fname)
return "\n".join(lines)
class MonitoringBuffer(object):
def __init__(self, size_limit, parent_log):
self.size_limit = size_limit
self.data = defaultdict(OrderedDict)
self.log = parent_log.getChild(self.__class__.__name__)
# data :: dict(datasource -> dict(interval -> datapoint))
# datapoint :: dict(metric -> value)
def record_data(self, data):
for monitoring_item in data:
item = copy.deepcopy(monitoring_item)
source = item.pop('source')
timestamp = int(item['ts'])
item['interval'] = 1
buff = self.data[source]
if timestamp in buff:
buff[timestamp].update(item)
else:
buff[timestamp] = item
sources = list(self.data)
for source in sources:
if len(self.data[source]) > self.size_limit:
self._downsample(self.data[source])
self.log.debug("Monitoring buffer size '%s': %s", source, len(self.data[source]))
def _downsample(self, buff):
size = 1
while len(buff) > self.size_limit:
self._merge_small_intervals(buff, size)
size += 1
def _merge_small_intervals(self, buff, size):
timestamps = list(buff)
merged_already = set()
for left, right in zip(timestamps, timestamps[1:]):
if left in merged_already:
continue
if buff[left]['interval'] <= size:
self._merge_datapoints(buff[left], buff[right])
buff.pop(right)
merged_already.add(left)
merged_already.add(right)
@staticmethod
def _merge_datapoints(left, right):
sum_size = float(left['interval'] + right['interval'])
for metric in set(right):
if metric in ('ts', 'interval'):
continue
if metric in left:
left[metric] = (left[metric] * left['interval'] + right[metric] * right['interval']) / sum_size
else:
left[metric] = right[metric]
left['interval'] = sum_size
def get_monitoring_json(self, session):
"""
:type session: Session
"""
results = {}
hosts = []
kpis = {}
for source, buff in iteritems(self.data):
for timestamp, item in iteritems(buff):
if source == 'local':
source = platform.node()
if source not in results:
results[source] = {
"name": source,
"intervals": OrderedDict()
}
if source not in hosts:
hosts.append(source)
src = results[source]
tstmp = timestamp * 1000
tstmp_key = '%d' % tstmp
if tstmp_key not in src['intervals']:
src['intervals'][tstmp_key] = {
"start": tstmp,
"duration": item['interval'] * 1000,
"indicators": {}
}
for field, value in iteritems(item):
if field.lower().startswith('conn-all'):
field = 'Connections'
elif field.lower().startswith('cpu'):
field = 'CPU'
elif field.lower().startswith('mem'):
field = 'Memory'
value *= 100
elif field == 'bytes-recv' or field.lower().startswith('net'):
field = 'Network I/O'
elif field == 'engine-loop':
field = 'Busy Taurus'
else:
continue # maybe one day BZA will accept all other metrics...
if field not in kpis:
kpis[field] = field
src['intervals'][tstmp_key]['indicators'][field] = {
"value": value,
"name": field,
"std": 0,
"mean": 0,
"sum": 0,
"min": 0,
"max": 0,
"sumOfSquares": 0,
"n": 1
}
kpis = {"Network I/O": "Network I/O", "Memory": "Memory", "CPU": "CPU", "Connections": "Connections"}
return {
"reportInfo": {
"sessionId": session['id'],
"timestamp": time.time(),
"userId": session['userId'],
"testId": session['testId'],
"type": "MONITOR",
"testName": ""
},
"kpis": kpis,
"hosts": hosts,
"results": results
}
class DatapointSerializer(object):
def __init__(self, owner):
"""
:type owner: BlazeMeterUploader
"""
super(DatapointSerializer, self).__init__()
self.owner = owner
self.multi = 1000 # miltiplier factor for reporting
def get_kpi_body(self, data_buffer, is_final):
# - reporting format:
# {labels: <data>, # see below
# sourceID: <id of BlazeMeterClient object>,
# [is_final: True]} # for last report
#
# - elements of 'data' are described in __get_label()
#
# - elements of 'intervals' are described in __get_interval()
# every interval contains info about response codes have gotten on it.
report_items = BetterDict()
if data_buffer:
self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP])
self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP])
# following data is received in the cumulative way
for label, kpi_set in iteritems(data_buffer[-1][DataPoint.CUMULATIVE]):
report_item = self.__get_label(label, kpi_set)
self.__add_errors(report_item, kpi_set) # 'Errors' tab
report_items[label] = report_item
# fill 'Timeline Report' tab with intervals data
# intervals are received in the additive way
for dpoint in data_buffer:
time_stamp = dpoint[DataPoint.TIMESTAMP]
for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]):
exc = TaurusInternalException('Cumulative KPISet is non-consistent')
report_item = report_items.get(label, exc)
report_item['intervals'].append(self.__get_interval(kpi_set, time_stamp))
report_items = [report_items[key] for key in sorted(report_items.keys())] # convert dict to list
data = {"labels": report_items, "sourceID": id(self.owner)}
if is_final:
data['final'] = True
return to_json(data)
@staticmethod
def __add_errors(report_item, kpi_set):
errors = kpi_set[KPISet.ERRORS]
for error in errors:
if error["type"] == KPISet.ERRTYPE_ERROR:
report_item['errors'].append({
'm': error['msg'],
"rc": error['rc'],
"count": error['cnt'],
})
elif error["type"] == KPISet.ERRTYPE_SUBSAMPLE:
report_item['failedEmbeddedResources'].append({
"count": error['cnt'],
"rm": error['msg'],
"rc": error['rc'],
"url": list(error['urls'])[0] if error['urls'] else None,
})
else:
report_item['assertions'].append({
'failureMessage': error['msg'],
'name': error['tag'] if error['tag'] else 'All Assertions',
'failures': error['cnt']
# TODO: "count", "errors" = ? (according do Udi's format description)
})
def __get_label(self, name, cumul):
return {
"n": cumul[KPISet.SAMPLE_COUNT], # total count of samples
"name": name if name else 'ALL', # label
"interval": 1, # not used
"intervals": [], # list of intervals, fill later
"samplesNotCounted": 0, # not used
"assertionsNotCounted": 0, # not used
"failedEmbeddedResources": [], # not used
"failedEmbeddedResourcesSpilloverCount": 0, # not used
"otherErrorsCount": 0, # not used
"errors": [], # list of errors, fill later
"assertions": [], # list of assertions, fill later
"percentileHistogram": [], # not used
"percentileHistogramLatency": [], # not used
"percentileHistogramBytes": [], # not used
"empty": False, # not used
"summary": self.__get_summary(cumul) # summary info
}
def __get_summary(self, cumul):
return {
"first": self.owner.first_ts,
"last": self.owner.last_ts,
"duration": self.owner.last_ts - self.owner.first_ts,
"failed": cumul[KPISet.FAILURES],
"hits": cumul[KPISet.SAMPLE_COUNT],
"avg": int(self.multi * cumul[KPISet.AVG_RESP_TIME]),
"min": int(self.multi * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0,
"max": int(self.multi * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0,
"std": int(self.multi * cumul[KPISet.STDEV_RESP_TIME]),
"tp90": int(self.multi * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0,
"tp95": int(self.multi * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0,
"tp99": int(self.multi * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0,
"latencyAvg": int(self.multi * cumul[KPISet.AVG_LATENCY]),
"latencyMax": 0,
"latencyMin": 0,
"latencySTD": 0,
"bytes": cumul[KPISet.BYTE_COUNT],
"bytesMax": 0,
"bytesMin": 0,
"bytesAvg": int(cumul[KPISet.BYTE_COUNT] / float(cumul[KPISet.SAMPLE_COUNT])),
"bytesSTD": 0,
"otherErrorsSpillcount": 0,
}
def __get_interval(self, item, time_stamp):
# rc_list - list of info about response codes:
# {'n': <number of code encounters>,
# 'f': <number of failed request (e.q. important for assertions)>
# 'rc': <string value of response code>}
rc_list = []
for r_code, cnt in iteritems(item[KPISet.RESP_CODES]):
fails = [err['cnt'] for err in item[KPISet.ERRORS] if str(err['rc']) == r_code]
rc_list.append({"n": cnt, 'f': fails, "rc": r_code})
return {
"ec": item[KPISet.FAILURES],
"ts": time_stamp,
"na": item[KPISet.CONCURRENCY],
"n": item[KPISet.SAMPLE_COUNT],
"failed": item[KPISet.FAILURES],
"rc": rc_list,
"t": {
"min": int(self.multi * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0,
"max": int(self.multi * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[
KPISet.PERCENTILES] else 0,
"sum": self.multi * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": self.multi * item[KPISet.STDEV_RESP_TIME],
"avg": self.multi * item[KPISet.AVG_RESP_TIME]
},
"lt": {
"min": 0,
"max": 0,
"sum": self.multi * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": self.multi * item[KPISet.AVG_LATENCY]
},
"by": {
"min": 0,
"max": 0,
"sum": item[KPISet.BYTE_COUNT],
"n": item[KPISet.SAMPLE_COUNT],
"std": 0,
"avg": item[KPISet.BYTE_COUNT] / float(item[KPISet.SAMPLE_COUNT])
},
}
class ProjectFinder(object):
"""
:type user: User
"""
def __init__(self, parameters, settings, user, workspaces, parent_log):
super(ProjectFinder, self).__init__()
self.default_test_name = "Taurus Test"
self.parameters = parameters
self.settings = settings
self.log = parent_log.getChild(self.__class__.__name__)
self.user = user
self.workspaces = workspaces
self.is_functional = False
def _find_project(self, proj_name):
"""
:rtype: bzt.bza.Project
"""
if isinstance(proj_name, (int, float)): # TODO: what if it's string "123"?
proj_id = int(proj_name)
self.log.debug("Treating project name as ID: %s", proj_id)
project = self.workspaces.projects(ident=proj_id).first()
if not project:
raise TaurusConfigError("BlazeMeter project not found by ID: %s" % proj_id)
return project
elif proj_name is not None:
return self.workspaces.projects(name=proj_name).first()
return None
def _ws_proj_switch(self, project):
if project:
return project
else:
return self.workspaces
def resolve_external_test(self):
proj_name = self.parameters.get("project", self.settings.get("project", None))
test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name))
project = self._find_project(proj_name)
if not project and proj_name:
project = self._default_or_create_project(proj_name)
test = self._ws_proj_switch(project).tests(name=test_name, test_type='external').first()
if not test:
if not project:
project = self._default_or_create_project(proj_name)
test = project.create_test(test_name, {"type": "external"})
return test
def resolve_account(self, account_name):
account = None
if isinstance(account_name, (int, float)): # TODO: what if it's string "123"?
acc_id = int(account_name)
self.log.debug("Treating account name as ID: %s", acc_id)
account = self.user.accounts(ident=acc_id).first()
if not account:
raise TaurusConfigError("BlazeMeter account not found by ID: %s" % acc_id)
elif account_name:
account = self.user.accounts(name=account_name).first()
if not account:
raise TaurusConfigError("BlazeMeter account not found by name: %s" % account_name)
if account:
return account
self.user.fetch()
account = self.user.accounts(ident=self.user['defaultProject']['accountId']).first()
self.log.debug("Using default account: %s", account)
return account
def resolve_workspace(self, account, workspace_name):
workspace = None
if isinstance(workspace_name, (int, float)): # TODO: what if it's string "123"?
workspace_id = int(workspace_name)
self.log.debug("Treating workspace name as ID: %s", workspace_id)
workspace = account.workspaces(ident=workspace_id).first()
if not workspace:
raise TaurusConfigError("BlazeMeter workspace not found by ID: %s" % workspace_id)
elif workspace_name is not None:
workspace = account.workspaces(name=workspace_name).first()
if not workspace:
raise TaurusConfigError("BlazeMeter workspace not found: %s" % workspace_name)
if workspace is None:
workspace = account.workspaces(ident=self.user['defaultProject']['workspaceId']).first()
self.log.debug("Using first workspace: %s" % workspace)
return workspace
def resolve_project(self, workspace, project_name):
project = None
if isinstance(project_name, (int, float)): # TODO: what if it's string "123"?
proj_id = int(project_name)
self.log.debug("Treating project name as ID: %s", proj_id)
project = workspace.projects(ident=proj_id).first()
if not project:
raise TaurusConfigError("BlazeMeter project not found by ID: %s" % proj_id)
elif project_name is not None:
project = workspace.projects(name=project_name).first()
if project is None:
project = self._create_project_or_use_default(workspace, project_name)
return project
def resolve_test(self, project, test_name, taurus_only=False):
test = None
is_int = isinstance(test_name, (int, float))
is_digit = isinstance(test_name, (string_types, text_type)) and test_name.isdigit()
if self.is_functional:
test_type = FUNC_TEST_TYPE
elif taurus_only:
test_type = TAURUS_TEST_TYPE
else:
test_type = None
if is_int or is_digit:
test_id = int(test_name)
self.log.debug("Treating project name as ID: %s", test_id)
test = project.multi_tests(ident=test_id).first()
if not test:
test = project.tests(ident=test_id, test_type=test_type).first()
if not test:
raise TaurusConfigError("BlazeMeter test not found by ID: %s" % test_id)
elif test_name is not None:
test = project.multi_tests(name=test_name).first()
if not test:
test = project.tests(name=test_name, test_type=test_type).first()
return test
def resolve_test_type(self):
use_deprecated = self.settings.get("use-deprecated-api", True)
default_location = self.settings.get("default-location", None)
account_name = self.parameters.get("account", self.settings.get("account", None))
workspace_name = self.parameters.get("workspace", self.settings.get("workspace", None))
project_name = self.parameters.get("project", self.settings.get("project", None))
test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name))
launch_existing_test = self.settings.get("launch-existing-test", False)
send_report_email = self.settings.get("send-report-email", False)
test_spec = parse_blazemeter_test_link(test_name)
self.log.debug("Parsed test link: %s", test_spec)
look_for_taurus_only = not launch_existing_test
if test_spec is not None:
# if we're to launch existing test - look for any type, otherwise - taurus only
account, workspace, project, test = self.user.test_by_ids(test_spec.account_id, test_spec.workspace_id,
test_spec.project_id, test_spec.test_id,
taurus_only=look_for_taurus_only)
if test is None:
raise TaurusConfigError("Test not found: %s", test_name)
self.log.info("Found test by link: %s", test_name)
else:
account = self.resolve_account(account_name)
workspace = self.resolve_workspace(account, workspace_name)
project = self.resolve_project(workspace, project_name)
test = self.resolve_test(project, test_name, taurus_only=look_for_taurus_only)
if isinstance(test, MultiTest):
self.log.debug("Detected test type: multi")
test_class = CloudCollectionTest
elif isinstance(test, Test):
self.log.debug("Detected test type: standard")
test_class = CloudTaurusTest
else:
if launch_existing_test:
raise TaurusConfigError("Can't find test for launching: %r" % test_name)
if use_deprecated or self.settings.get("cloud-mode") == 'taurusCloud':
self.log.debug("Will create standard test")
test_class = CloudTaurusTest
else:
self.log.debug("Will create a multi test")
test_class = CloudCollectionTest
assert test_class is not None
router = test_class(self.user, test, project, test_name, default_location, launch_existing_test,
self.log)
router._workspaces = self.workspaces
router.cloud_mode = self.settings.get("cloud-mode", None)
router.dedicated_ips = self.settings.get("dedicated-ips", False)
router.is_functional = self.is_functional
router.send_report_email = send_report_email
return router
def _create_project_or_use_default(self, workspace, proj_name):
if proj_name:
return workspace.create_project(proj_name)
else:
info = self.user.fetch()
self.log.debug("Looking for default project: %s", info['defaultProject']['id'])
project = self.workspaces.projects(ident=info['defaultProject']['id']).first()
if not project:
project = workspace.create_project("Taurus Tests Project")
return project
def _default_or_create_project(self, proj_name):
if proj_name:
return self.workspaces.first().create_project(proj_name)
else:
info = self.user.fetch()
project = self.workspaces.projects(ident=info['defaultProject']['id']).first()
if not project:
project = self.workspaces.first().create_project("Taurus Tests Project")
return project
class BaseCloudTest(object):
"""
:type _user: bzt.bza.User
:type _project: bzt.bza.Project
:type _test: bzt.bza.Test
:type master: bzt.bza.Master
:type cloud_mode: str
"""
def __init__(self, user, test, project, test_name, default_location, launch_existing_test, parent_log):
self.default_test_name = "Taurus Test"
self.log = parent_log.getChild(self.__class__.__name__)
self.default_location = default_location
self._test_name = test_name
self._last_status = None
self._sessions = None
self._started = False
self._user = user
self._project = project
self._test = test
self.launch_existing_test = launch_existing_test
self.master = None
self._workspaces = None
self.cloud_mode = None
self.dedicated_ips = False
self.is_functional = False
self.send_report_email = False
@abstractmethod
def prepare_locations(self, executors, engine_config):
pass
@abstractmethod
def resolve_test(self, taurus_config, rfiles, delete_old_files=False):
pass
@abstractmethod
def launch_test(self):
"""launch cloud test"""
pass
@abstractmethod
def start_if_ready(self):
"""start cloud test if all engines are ready"""
pass
@abstractmethod
def get_test_status_text(self):
pass
@abstractmethod
def stop_test(self):
pass
def get_master_status(self):
self._last_status = self.master.get_status()
return self._last_status
class CloudTaurusTest(BaseCloudTest):
def prepare_locations(self, executors, engine_config):
available_locations = {}
is_taurus4 = True
workspace = Workspace(self._project, {'id': self._project['workspaceId']})
for loc in workspace.locations(include_private=is_taurus4):
available_locations[loc['id']] = loc
if CloudProvisioning.LOC in engine_config and not is_taurus4:
self.log.warning("Deprecated test API doesn't support global locations")
for executor in executors:
if CloudProvisioning.LOC in executor.execution \
and isinstance(executor.execution[CloudProvisioning.LOC], dict):
exec_locations = executor.execution[CloudProvisioning.LOC]
self._check_locations(exec_locations, available_locations)
elif CloudProvisioning.LOC in engine_config and is_taurus4:
self._check_locations(engine_config[CloudProvisioning.LOC], available_locations)
else:
default_loc = self._get_default_location(available_locations)
executor.execution[CloudProvisioning.LOC] = BetterDict.from_dict({default_loc: 1})
executor.get_load() # we need it to resolve load settings into full form
def _get_default_location(self, available_locations):
if self.default_location and self.default_location in available_locations:
return self.default_location
self.log.debug("Default location %s not found", self.default_location)
for location_id in sorted(available_locations):
location = available_locations[location_id]
if location['sandbox'] and not location.get('purposes', {}).get('functional', False):
return location_id
if available_locations:
location_id = sorted(available_locations.keys())[0]
self.log.warning("Using first location as default: %s", location_id)
return location_id
self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
raise TaurusConfigError("No sandbox or default location available, please specify locations manually")
def _check_locations(self, locations, available_locations):
for location in locations:
if location not in available_locations:
self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
raise TaurusConfigError("Invalid location requested: %s" % location)
def resolve_test(self, taurus_config, rfiles, delete_old_files=False):
if self.launch_existing_test:
return
if self._test is not None:
test_type = self._test.get("configuration").get("type")
should_be_func = (self.is_functional and test_type != FUNC_TEST_TYPE)
should_be_taurus = (not self.is_functional and test_type != TAURUS_TEST_TYPE)
if should_be_func or should_be_taurus:
self.log.debug("Can't reuse test type %r as Taurus test, will create new one", test_type)
self._test = None
if self._test is None:
test_config = {
"type": FUNC_TEST_TYPE if self.is_functional else TAURUS_TEST_TYPE,
"plugins": {
"taurus": {
"filename": "" # without this line it does not work
}
}
}
self._test = self._project.create_test(self._test_name, test_config)
if delete_old_files:
self._test.delete_files()
taurus_config = yaml.safe_dump(taurus_config, default_flow_style=False, explicit_start=True, canonical=False)
self._test.upload_files(taurus_config, rfiles)
self._test.update_props({'configuration': {'executionType': self.cloud_mode}})
self._test.update_props({
'configuration': {'plugins': {'reportEmail': {"enabled": self.send_report_email}}}
})
def launch_test(self):
self.log.info("Initiating cloud test with %s ...", self._test.address)
self.master = self._test.start(as_functional=self.is_functional)
return self.master.address + '/app/#/masters/%s' % self.master['id']
def start_if_ready(self):
self._started = True
def stop_test(self):
if self.master:
self.log.info("Ending cloud test...")
if not self._last_status:
self.get_master_status()
if self._last_status["progress"] >= 100:
self.master.stop()
else:
self.master.terminate()
def get_test_status_text(self):
if not self._sessions:
self._sessions = self.master.sessions()
if not self._sessions:
return
mapping = BetterDict() # dict(executor -> dict(scenario -> dict(location -> servers count)))
for session in self._sessions:
try:
name_split = [part.strip() for part in session['name'].split('/')]
location = session['configuration']['location']
count = session['configuration']['serversCount']
ex_item = mapping.get(name_split[0], force_set=True)
if len(name_split) > 1:
name = name_split[1]
else:
name = "N/A"
ex_item.get(name, force_set=True)[location] = count
except KeyError:
self._sessions = None
txt = "%s #%s\n" % (self._test['name'], self.master['id'])
for executor, scenarios in iteritems(mapping):
txt += " %s" % executor
for scenario, locations in iteritems(scenarios):
txt += " %s:\n" % scenario
for location, count in iteritems(locations):
txt += " Agents in %s: %s\n" % (location, count)
return txt
class CloudCollectionTest(BaseCloudTest):
def prepare_locations(self, executors, engine_config):
available_locations = {}
for loc in self._workspaces.locations(include_private=True):
available_locations[loc['id']] = loc
global_locations = engine_config.get(CloudProvisioning.LOC)
self._check_locations(global_locations, available_locations)
for executor in executors:
if CloudProvisioning.LOC in executor.execution:
exec_locations = executor.execution[CloudProvisioning.LOC]
self._check_locations(exec_locations, available_locations)
else:
if not global_locations:
default_loc = self._get_default_location(available_locations)
executor.execution[CloudProvisioning.LOC] = BetterDict.from_dict({default_loc: 1})
executor.get_load() # we need it to resolve load settings into full form
if global_locations and all(CloudProvisioning.LOC in executor.execution for executor in executors):
self.log.warning("Each execution has locations specified, global locations won't have any effect")
engine_config.pop(CloudProvisioning.LOC)
def _get_default_location(self, available_locations):
for location_id in sorted(available_locations):
location = available_locations[location_id]
if location['sandbox']:
return location_id
self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
raise TaurusConfigError("No sandbox or default location available, please specify locations manually")
def _check_locations(self, locations, available_locations):
for location in locations:
if location not in available_locations:
self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
raise TaurusConfigError("Invalid location requested: %s" % location)
def resolve_test(self, taurus_config, rfiles, delete_old_files=False):
self.log.warning("Using collection-based tests is deprecated in Taurus, will be removed in future versions")
if self.launch_existing_test:
return
# TODO: handle delete_old_files ?
if not self._project:
raise TaurusInternalException() # TODO: build unit test to catch this situation
collection_draft = self._user.collection_draft(self._test_name, taurus_config, rfiles)
if self._test is None:
self.log.debug("Creating cloud collection test")
self._test = self._project.create_multi_test(collection_draft)
else:
self.log.debug("Overriding cloud collection test")
collection_draft['projectId'] = self._project['id']
self._test.update_collection(collection_draft)
def launch_test(self):
self.log.info("Initiating cloud test with %s ...", self._test.address)
self.master = self._test.start()
return self.master.address + '/app/#/masters/%s' % self.master['id']
def start_if_ready(self):
if self._started:
return
if self._last_status is None:
return
sessions = self._last_status.get("sessions")
if sessions and all(session["status"] == "JMETER_CONSOLE_INIT" for session in sessions):
self.log.info("All servers are ready, starting cloud test")
self.master.force_start()
self._started = True
def await_test_end(self):
iterations = 0
while True:
if iterations > 100:
self.log.debug("Await: iteration limit reached")
return
status = self.master.get_status()
if status.get("status") == "ENDED":
return
iterations += 1
time.sleep(1.0)
def stop_test(self):
if self._started and self._test:
self.log.info("Shutting down cloud test...")
self._test.stop()
self.await_test_end()
elif self.master:
self.log.info("Shutting down cloud test...")
self.master.stop()
def get_test_status_text(self):
if not self._sessions:
sessions = self.master.sessions()
if not sessions:
return
self._sessions = {session["id"]: session for session in sessions}
if not self._last_status:
return
mapping = BetterDict() # dict(scenario -> dict(location -> servers count))
for session_status in self._last_status["sessions"]:
try:
session_id = session_status["id"]
session = self._sessions[session_id]
location = session_status["locationId"]
servers_count = len(session_status["readyStatus"]["servers"])
name_split = [part.strip() for part in session['name'].split('/')]
if len(name_split) > 1:
scenario = name_split[1]
else:
scenario = "N/A"
scenario_item = mapping.get(scenario, force_set=True)
scenario_item.get(location, 0, force_set=True)
scenario_item[location] += servers_count
except (KeyError, TypeError):
self._sessions = None
txt = "%s #%s\n" % (self._test['name'], self.master['id'])
for scenario, locations in iteritems(mapping):
txt += " %s:\n" % scenario
for location, count in iteritems(locations):
txt += " Agents in %s: %s\n" % (location, count)
return txt
class MasterProvisioning(Provisioning):
def get_rfiles(self):
rfiles = []
additional_files = []
for executor in self.executors:
executor_rfiles = executor.get_resource_files()
config = to_json(self.engine.config.get('execution'))
config += to_json(self.engine.config.get('scenarios'))
config += to_json(executor.settings)
for rfile in executor_rfiles:
if has_variable_pattern(rfile):
continue
if not os.path.exists(self.engine.find_file(rfile)):
raise TaurusConfigError("%s: resource file '%s' not found" % (executor, rfile))
if to_json(rfile) not in config: # TODO: might be check is needed to improve
additional_files.append(rfile)
rfiles += executor_rfiles
if additional_files:
raise TaurusConfigError("Following files can't be handled in cloud: %s" % additional_files)
rfiles = list(set(rfiles))
rfiles = [x for x in rfiles if not has_variable_pattern(x)]
self.log.debug("All resource files are: %s", rfiles)
return rfiles
def _fix_filenames(self, old_names):
# check for concurrent base names
old_full_names = [self.engine.find_file(x) for x in old_names]
rbases = [os.path.basename(get_full_path(rfile)) for rfile in old_full_names]
rpaths = [get_full_path(rfile, step_up=1) for rfile in old_full_names]
while rbases:
base, path = rbases.pop(), rpaths.pop()
if base in rbases:
index = rbases.index(base)
if path != rpaths[index]:
msg = 'Resource "%s" occurs more than one time, rename to avoid data loss'
raise TaurusConfigError(msg % base)
old_full_names = self.__pack_dirs(old_full_names)
new_base_names = [os.path.basename(f) for f in old_full_names]
self.log.debug('Replace file names in config: %s with %s', old_names, new_base_names)
replace_in_config(self.engine.config, old_names, new_base_names, log=self.log)
old_full_names = list(set(old_full_names))
return old_full_names
def __pack_dirs(self, source_list):
result_list = [] # files for upload
packed_list = [] # files for unpacking
for source in source_list:
source = get_full_path(source)
if os.path.isfile(source):
result_list.append(source)
else: # source is dir
self.log.debug("Compress directory '%s'", source)
base_dir_name = os.path.basename(source)
zip_name = self.engine.create_artifact(base_dir_name, '.zip')
relative_prefix_len = len(os.path.dirname(source))
with zipfile.ZipFile(zip_name, 'w') as zip_file:
for _file in get_files_recursive(source):
zip_file.write(_file, _file[relative_prefix_len:])
result_list.append(zip_name)
packed_list.append(base_dir_name + '.zip')
if packed_list:
services = self.engine.config.get(Service.SERV, [], force_set=True)
unpacker = BetterDict.from_dict({'module': Unpacker.UNPACK, Unpacker.FILES: packed_list, 'run-at': 'local'})
services.append(unpacker)
return result_list
class CloudProvisioning(MasterProvisioning, WidgetProvider):
"""
:type user: bzt.bza.User
:type results_reader: ResultsFromBZA
:type router: BaseCloudTest
:type _workspaces: bzt.bza.BZAObjectsList[bzt.bza.Workspace]
"""
LOC = "locations"
LOC_WEIGHTED = "locations-weighted"
DEDICATED_IPS = "dedicated-ips"
def __init__(self):
super(CloudProvisioning, self).__init__()
self.results_url = None
self.results_reader = None
self.user = User()
self.__last_master_status = None
self.browser_open = 'start'
self.widget = None
self.detach = False
self.router = None
self.test_ended = False
self.check_interval = 5.0
self._last_check_time = None
self.public_report = False
self.report_name = None
self._workspaces = []
self.launch_existing_test = None
self.disallow_empty_execution = False
@staticmethod
def merge_with_blazemeter_config(module):
if 'blazemeter' not in module.engine.config.get('modules'):
module.log.debug("Module 'blazemeter' wasn't found in base config")
return
bm_mod = module.engine.instantiate_module('blazemeter')
bm_settings = copy.deepcopy(bm_mod.settings)
bm_settings.update(module.settings)
module.settings = bm_settings
def prepare(self):
CloudProvisioning.merge_with_blazemeter_config(self)
CloudProvisioning.configure_client(self)
self._workspaces = self.user.accounts().workspaces()
if not self._workspaces:
raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
self.__dump_locations_if_needed()
super(CloudProvisioning, self).prepare()
self.browser_open = self.settings.get("browser-open", self.browser_open)
self.detach = self.settings.get("detach", self.detach)
self.check_interval = dehumanize_time(self.settings.get("check-interval", self.check_interval))
self.public_report = self.settings.get("public-report", self.public_report)
is_execution_empty = not self.engine.config.get("execution")
self.launch_existing_test = self.settings.get("launch-existing-test", is_execution_empty, force_set=True)
if not self.launch_existing_test:
self._filter_reporting()
finder = ProjectFinder(self.parameters, self.settings, self.user, self._workspaces, self.log)
finder.default_test_name = "Taurus Cloud Test"
finder.is_functional = self.engine.is_functional_mode()
self.router = finder.resolve_test_type()
if not self.launch_existing_test:
self.router.prepare_locations(self.executors, self.engine.config)
res_files = self.get_rfiles()
files_for_cloud = self._fix_filenames(res_files)
config_for_cloud = self.prepare_cloud_config()
config_for_cloud.dump(self.engine.create_artifact("cloud", ""))
del_files = self.settings.get("delete-test-files", True)
self.router.resolve_test(config_for_cloud, files_for_cloud, del_files)
self.report_name = self.settings.get("report-name", self.report_name)
if self.report_name == 'ask' and sys.stdin.isatty():
self.report_name = r_input("Please enter report-name: ")
self.widget = self.get_widget()
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.results_reader = ResultsFromBZA()
self.results_reader.log = self.log
self.engine.aggregator.add_underling(self.results_reader)
elif isinstance(self.engine.aggregator, FunctionalAggregator):
self.results_reader = FunctionalBZAReader(self.log)
self.engine.aggregator.add_underling(self.results_reader)
@staticmethod
def _get_other_modules(config):
used_classes = LocalClient.__name__, BlazeMeterUploader.__name__
used_modules = []
for module in config.get("modules"):
class_name = config.get("modules").get(module).get("class")
if class_name and (class_name.split('.')[-1] in used_classes):
used_modules.append(module)
return used_modules
def _get_executors(self):
executors = []
for executor in self.executors:
executors.append(executor.execution.get("executor"))
if isinstance(executor, SeleniumExecutor):
executors.append(executor.runner.execution.get("executor"))
return executors
def _filter_unused_modules(self, config, provisioning):
services = [service.get("module") for service in config.get(Service.SERV)]
reporters = [reporter.get("module") for reporter in config.get(Reporter.REP)]
consolidator = config.get(SETTINGS).get("aggregator")
used_modules = self._get_executors() + self._get_other_modules(config)
used_modules += services + reporters + [consolidator, provisioning]
modules = set(config.get("modules").keys())
for module in modules:
if config.get("modules")[module].get("send-to-blazemeter"):
continue
if module not in used_modules:
del config.get("modules")[module]
elif config.get("modules")[module].get("class"):
del config.get("modules")[module]["class"]
def prepare_cloud_config(self):
# expand concurrency and throughput
for executor in self.executors:
executor.get_load()
config = copy.deepcopy(self.engine.config)
provisioning = config.get(Provisioning.PROV)
self._filter_unused_modules(config, provisioning)
# todo: should we remove config['version'] before sending to cloud?
config['local-bzt-version'] = config.get('version', 'N/A')
config.filter(CLOUD_CONFIG_BLACK_LIST, black_list=True)
for execution in config[ScenarioExecutor.EXEC]:
if execution.get("files") == []:
del execution["files"]
for param in (ScenarioExecutor.CONCURR, ScenarioExecutor.THRPT):
param_value = execution.get(param).get(provisioning, None)
if param_value is None:
del execution[param]
else:
execution[param] = param_value
if self.router.dedicated_ips:
config[CloudProvisioning.DEDICATED_IPS] = True
assert isinstance(config, Configuration)
return config
def __dump_locations_if_needed(self):
if self.settings.get("dump-locations", False):
locations = {}
for loc in self._workspaces.locations(include_private=True):
locations[loc['id']] = loc
data = [("ID", "Name")]
for location_id in sorted(locations):
location = locations[location_id]
data.append((location_id, location['title']))
table = SingleTable(data) if sys.stdout.isatty() else AsciiTable(data)
self.log.warning("Dumping available locations instead of running the test:\n%s", table.table)
raise NormalShutdown("Done listing locations")
def _filter_reporting(self):
reporting = self.engine.config.get(Reporter.REP, [])
new_reporting = []
for index, reporter in enumerate(reporting):
exc = TaurusConfigError("'module' attribute not found in %s" % reporter)
cls = reporter.get('module', exc)
if cls == 'blazemeter':
self.log.warning("Explicit blazemeter reporting is skipped for cloud")
else:
new_reporting.append(reporter)
self.engine.config[Reporter.REP] = new_reporting
@staticmethod
def configure_client(module):
module.user.log = module.log
module.user.logger_limit = module.settings.get("request-logging-limit", module.user.logger_limit)
module.user.address = module.settings.get("address", module.user.address)
module.user.token = module.settings.get("token", module.user.token)
module.user.timeout = dehumanize_time(module.settings.get("timeout", module.user.timeout))
if isinstance(module.user.http_session, requests.Session):
module.log.debug("Installing http client")
module.user.http_session = module.engine.get_http_client()
module.user.http_request = module.user.http_session.request
if not module.user.token:
raise TaurusConfigError("You must provide API token to use cloud provisioning")
def startup(self):
super(CloudProvisioning, self).startup()
self.results_url = self.router.launch_test()
self.log.info("Started cloud test: %s", self.results_url)
if self.results_url:
if self.browser_open in ('start', 'both'):
open_browser(self.results_url)
if self.user.token and self.public_report:
public_link = self.router.master.make_report_public()
self.log.info("Public report link: %s", public_link)
if self.report_name:
self.router.master.set({"name": str(self.report_name)})
def _should_skip_check(self):
now = time.time()
if self._last_check_time is None:
return False
elif now >= self._last_check_time + self.check_interval:
return False
else:
return True
def check(self):
if self.detach:
self.log.warning('Detaching Taurus from started test...')
return True
if self._should_skip_check():
self.log.debug("Skipping cloud status check")
return False
self._last_check_time = time.time()
master = self._check_master_status()
if "status" in master and master['status'] != self.__last_master_status:
self.__last_master_status = master['status']
self.log.info("Cloud test status: %s", self.__last_master_status)
if self.results_reader is not None and 'progress' in master and master['progress'] >= BZA_TEST_DATA_RECEIVED:
self.results_reader.master = self.router.master
if 'progress' in master and master['progress'] > BZA_TEST_DATA_RECEIVED:
self.log.info("Test was stopped in the cloud: %s", master['status'])
self.test_ended = True
return True
self.router.start_if_ready()
self.widget.update()
return super(CloudProvisioning, self).check()
@get_with_retry
def _check_master_status(self):
return self.router.get_master_status()
def post_process(self):
if not self.detach and self.router and not self.test_ended:
self.router.stop_test()
if self.results_url:
if self.browser_open in ('end', 'both'):
open_browser(self.results_url)
if self.router and self.router.master:
full = self.router.master.get_full()
if 'note' in full and full['note']:
self.log.warning("Cloud test has probably failed with message: %s", full['note'])
for session in full.get('sessions', ()):
for error in session.get("errors", ()):
raise TaurusException(to_json(error))
if "hasThresholds" in full and full["hasThresholds"]:
thresholds = self.router.master.get_thresholds()
for item in thresholds.get('data', []):
if item.get('success', None) is False:
reason = None
for assertion in item.get('assertions', []):
if assertion.get('success', None) is False:
criterion = assertion.get('field', '')
label = assertion.get('label', '')
reason = "Cloud failure criterion %r (on label %r) was met" % (criterion, label)
break
if reason is None:
reason = "Cloud tests failed because failure criteria were met"
self.log.warning(reason)
raise AutomatedShutdown(reason)
# if we have captured HARs, let's download them
for service in self.engine.config.get(Service.SERV, []):
mod = service.get('module', TaurusConfigError("No 'module' specified for service"))
assert isinstance(mod, string_types), mod
module = self.engine.instantiate_module(mod)
if isinstance(module, ServiceStubCaptureHAR):
self._download_logs()
break
if "functionalSummary" in full:
summary = full["functionalSummary"]
if summary is None or summary.get("isFailed", False):
raise AutomatedShutdown("Cloud tests failed")
def _download_logs(self):
for session in self.router.master.sessions():
assert isinstance(session, Session)
for log in session.get_logs():
self.log.info("Downloading %s from the cloud", log['filename'])
cloud_dir = os.path.join(self.engine.artifacts_dir, 'cloud-artifacts')
if not os.path.exists(cloud_dir):
os.makedirs(cloud_dir)
dest = os.path.join(cloud_dir, log['filename'])
dwn = ExceptionalDownloader(self.engine.get_http_client())
with ProgressBarContext() as pbar:
try:
dwn.get(log['dataUrl'], dest, reporthook=pbar.download_callback)
except BaseException:
self.log.debug("Error is: %s", traceback.format_exc())
self.log.warning("Failed to download from %s", log['dataUrl'])
continue
if log['filename'].startswith('artifacts') and log['filename'].endswith('.zip'):
with zipfile.ZipFile(dest) as zipf:
for name in zipf.namelist():
ext = name.split('.')[-1].lower()
if ext in ('har', 'jpg', 'js', 'html', 'css'):
self.log.debug("Extracting %s to %s", name, cloud_dir)
zipf.extract(name, cloud_dir)
def get_widget(self):
if not self.widget:
self.widget = CloudProvWidget(self.router)
return self.widget
class ResultsFromBZA(ResultsProvider):
"""
:type master: bzt.bza.Master
"""
def __init__(self, master=None):
super(ResultsFromBZA, self).__init__()
self.master = master
self.min_ts = 0
self.log = logging.getLogger('')
self.prev_errors = BetterDict()
self.cur_errors = BetterDict()
self.handle_errors = True
def _get_err_diff(self):
# find diff of self.prev_errors and self.cur_errors
diff = {}
for label in self.cur_errors:
if label not in self.prev_errors:
diff[label] = self.cur_errors[label]
continue
for msg in self.cur_errors[label]:
if msg not in self.prev_errors[label]:
prev_count = 0
else:
prev_count = self.prev_errors[label][msg]['count']
delta = self.cur_errors[label][msg]['count'] - prev_count
if delta > 0:
if label not in diff:
diff[label] = {}
diff[label][msg] = {'count': delta, 'rc': self.cur_errors[label][msg]['rc']}
return {k: diff[k] for k in diff if diff[k]} # clean from empty items
def _calculate_datapoints(self, final_pass=False):
if self.master is None:
return
data, aggr_raw = self.query_data()
aggr = {}
for label in aggr_raw:
aggr[label['labelName']] = label
for label in data:
if label.get('kpis') and not final_pass:
label['kpis'].pop(-1) # never take last second since it could be incomplete
timestamps = []
for label in data:
if label.get('label') == 'ALL':
timestamps.extend([kpi['ts'] for kpi in label.get('kpis', [])])
self.handle_errors = True
for tstmp in timestamps:
point = DataPoint(tstmp)
point[DataPoint.SOURCE_ID] = self.master['id']
self.__generate_kpisets(aggr, data, point, tstmp)
if self.handle_errors:
self.handle_errors = False
self.cur_errors = self.__get_errors_from_bza()
err_diff = self._get_err_diff()
if err_diff:
self.__add_err_diff(point, err_diff)
self.prev_errors = self.cur_errors
point.recalculate()
self.min_ts = point[DataPoint.TIMESTAMP] + 1
yield point
def __add_err_diff(self, point, err_diff):
for label in err_diff:
point_label = '' if label == 'ALL' else label
if point_label not in point[DataPoint.CURRENT]:
self.log.warning("Got inconsistent kpi/error data for label: %s", point_label)
kpiset = KPISet()
point[DataPoint.CURRENT][point_label] = kpiset
kpiset[KPISet.SAMPLE_COUNT] = sum([item['count'] for item in err_diff[label].values()])
else:
kpiset = point[DataPoint.CURRENT][point_label]
kpiset[KPISet.ERRORS] = self.__get_kpi_errors(err_diff[label])
kpiset[KPISet.FAILURES] = sum([x['cnt'] for x in kpiset[KPISet.ERRORS]])
kpiset[KPISet.SAMPLE_COUNT] = kpiset[KPISet.SUCCESSES] + kpiset[KPISet.FAILURES]
assert kpiset[KPISet.SAMPLE_COUNT] > 0, point_label
def __generate_kpisets(self, aggr, data, point, tstmp):
for label in data:
for kpi in label.get('kpis', []):
if kpi['ts'] != tstmp:
continue
label_str = label.get('label')
if label_str is None or label_str not in aggr:
self.log.warning("Skipping inconsistent data from API for label: %s", label_str)
continue
if kpi['n'] <= 0:
self.log.warning("Skipping empty KPI item got from API: %s", kpi)
continue
kpiset = self.__get_kpiset(aggr, kpi, label_str)
point[DataPoint.CURRENT]['' if label_str == 'ALL' else label_str] = kpiset
def __get_errors_from_bza(self):
#
# This method reads error report from BZA
#
# internal errors format:
# <request_label>:
# <error_message>:
# 'count': <count of errors>
# 'rc': <response code>
#
result = {}
try:
errors = self.master.get_errors()
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get errors, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
errors = self.master.get_errors()
self.log.info("Succeeded with retry")
for e_record in errors:
_id = e_record["_id"]
if _id == "ALL":
_id = ""
result[_id] = {}
for error in e_record['errors']:
result[_id][error['m']] = {'count': error['count'], 'rc': error['rc']}
for assertion in e_record['assertions']:
result[_id][assertion['failureMessage']] = {'count': assertion['failures'], 'rc': assertion['name']}
return result
def __get_kpi_errors(self, errors):
result = []
for msg in errors:
kpi_error = KPISet.error_item_skel(
error=msg,
ret_c=errors[msg]['rc'],
cnt=errors[msg]['count'],
errtype=KPISet.ERRTYPE_ERROR, # TODO: what about asserts?
urls=Counter(), tag=None)
result.append(kpi_error)
return result
def __get_kpiset(self, aggr, kpi, label):
kpiset = KPISet()
kpiset[KPISet.FAILURES] = kpi['ec']
kpiset[KPISet.CONCURRENCY] = kpi['na']
kpiset[KPISet.SAMPLE_COUNT] = kpi['n']
assert kpi['n'] > 0 and kpi['n'] >= kpi['ec']
kpiset[KPISet.SUCCESSES] = kpi['n'] - kpi['ec']
kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0
kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0
perc_map = {'90line': 90.0, "95line": 95.0, "99line": 99.0}
for field, level in iteritems(perc_map):
kpiset[KPISet.PERCENTILES][str(level)] = aggr[label][field] / 1000.0
return kpiset
def query_data(self):
try:
data = self.master.get_kpis(self.min_ts)
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get result KPIs, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
data = self.master.get_kpis(self.min_ts)
self.log.info("Succeeded with retry")
try:
aggr = self.master.get_aggregate_report()
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get aggregate results, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
aggr = self.master.get_aggregate_report()
self.log.info("Succeeded with retry")
return data, aggr
class FunctionalBZAReader(FunctionalResultsReader):
def __init__(self, parent_log, master=None):
super(FunctionalBZAReader, self).__init__()
self.master = master
self.log = parent_log.getChild(self.__class__.__name__)
@staticmethod
def extract_samples_from_group(group, group_summary):
group_name = group_summary.get("name") or "Tests"
for sample in group["samples"]:
status = "PASSED"
if sample["error"]:
status = "FAILED"
error_msg = ""
error_trace = ""
assertions = sample.get("assertions")
if assertions:
for assertion in assertions:
if assertion.get("isFailed"):
error_msg = assertion.get("errorMessage")
status = "BROKEN"
rtm = sample.get("responseTime") or 0.0
yield FunctionalSample(
test_case=sample["label"],
test_suite=group_name,
status=status,
start_time=int(sample["created"]),
duration=rtm / 1000.0,
error_msg=error_msg,
error_trace=error_trace,
extras={},
subsamples=[],
)
def read(self, last_pass=False):
if self.master is None:
return
if last_pass:
try:
groups = self.master.get_functional_report_groups()
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get test groups, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
groups = self.master.get_functional_report_groups()
self.log.info("Succeeded with retry")
for group_summary in groups:
group_id = group_summary['groupId']
try:
group = self.master.get_functional_report_group(group_id)
except (URLError, TaurusNetworkError):
self.log.warning("Failed to get test group, will retry in %s seconds...", self.master.timeout)
self.log.debug("Full exception: %s", traceback.format_exc())
time.sleep(self.master.timeout)
group = self.master.get_functional_report_group(group_id)
self.log.info("Succeeded with retry")
for sample in self.extract_samples_from_group(group, group_summary):
yield sample
class CloudProvWidget(Pile, PrioritizedWidget):
def __init__(self, test):
"""
:type test: BaseCloudTest
"""
self.test = test
self.text = Text("")
super(CloudProvWidget, self).__init__([self.text])
PrioritizedWidget.__init__(self)
def update(self):
txt = self.test.get_test_status_text()
if txt:
self.text.set_text(txt)
class ServiceStubScreenshoter(Service):
def startup(self):
if not isinstance(self.engine.provisioning, CloudProvisioning):
self.log.warning("Stub for service 'screenshoter', use cloud provisioning to have it working")
class ServiceStubCaptureHAR(Service):
def startup(self):
if not isinstance(self.engine.provisioning, CloudProvisioning):
self.log.warning("Stub for service 'capturehar', use cloud provisioning to have it working")
| 41.047271 | 120 | 0.592338 |
ace657a21cbc1166b8dcefa770f3f45939d6d167 | 2,498 | py | Python | test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_tuning_model_dir.py | whn09/deep-learning-containers | 45493c141ad7bcadc4293f3efa5b1ad983f942b7 | [
"Apache-2.0"
] | 383 | 2020-05-19T18:09:10.000Z | 2022-03-29T22:41:05.000Z | test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_tuning_model_dir.py | nam-nd-d3/deep-learning-containers | 4c03f5cf7eda3276c6a03516e6823872bd8eeb42 | [
"Apache-2.0"
] | 551 | 2020-05-27T17:25:50.000Z | 2022-03-31T18:00:35.000Z | test/sagemaker_tests/tensorflow/tensorflow2_training/integration/sagemaker/test_tuning_model_dir.py | ashahba/deep-learning-containers | 48c3948b3d11f4fe2aac6bb25e5d82230d777076 | [
"Apache-2.0"
] | 263 | 2020-05-19T18:17:12.000Z | 2022-03-29T22:41:10.000Z | # Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import pytest
from sagemaker.tensorflow import TensorFlow
from sagemaker.tuner import HyperparameterTuner, IntegerParameter
from ..... import invoke_sm_helper_function
from ...integration.utils import processor, py_version, unique_name_from_base # noqa: F401
@pytest.mark.integration("hpo")
@pytest.mark.model("N/A")
def test_model_dir_with_training_job_name(ecr_image, sagemaker_regions, instance_type, framework_version):
invoke_sm_helper_function(ecr_image, sagemaker_regions, _test_model_dir_with_training_job_name_function,
instance_type, framework_version)
def _test_model_dir_with_training_job_name_function(ecr_image, sagemaker_session, instance_type, framework_version):
resource_path = os.path.join(os.path.dirname(__file__), '../..', 'resources')
script = os.path.join(resource_path, 'tuning_model_dir', 'entry.py')
estimator = TensorFlow(entry_point=script,
role='SageMakerRole',
instance_type=instance_type,
instance_count=1,
image_uri=ecr_image,
framework_version=framework_version,
py_version='py3',
sagemaker_session=sagemaker_session)
tuner = HyperparameterTuner(estimator=estimator,
objective_metric_name='accuracy',
hyperparameter_ranges={'arbitrary_value': IntegerParameter(0, 1)},
metric_definitions=[{'Name': 'accuracy', 'Regex': 'accuracy=([01])'}],
max_jobs=1,
max_parallel_jobs=1)
# User script has logic to check for the correct model_dir
tuner.fit(job_name=unique_name_from_base('test-tf-model-dir', max_length=32))
tuner.wait()
| 44.607143 | 116 | 0.670937 |
ace657ea5b3a746394c2ac9021c3dbd52a183967 | 385 | py | Python | LaccProj/webapp/migrations/0005_nominee_sat_scores.py | delaware2017/team-12 | 738b2c5d3936d5d26ba79f36ef8634a8eca48b28 | [
"MIT"
] | null | null | null | LaccProj/webapp/migrations/0005_nominee_sat_scores.py | delaware2017/team-12 | 738b2c5d3936d5d26ba79f36ef8634a8eca48b28 | [
"MIT"
] | null | null | null | LaccProj/webapp/migrations/0005_nominee_sat_scores.py | delaware2017/team-12 | 738b2c5d3936d5d26ba79f36ef8634a8eca48b28 | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2017-11-04 13:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0004_auto_20171104_1332'),
]
operations = [
migrations.AddField(
model_name='nominee',
name='SAT_Scores',
field=models.IntegerField(default=0),
),
]
| 20.263158 | 49 | 0.6 |
ace658354c6f8aa20619312571300d6404b023bd | 1,724 | py | Python | crichtonweb/cli/crichtoncli/commands/common.py | bpluly/crichton | a2fa09c181ba1e44ee1aae7a57769e1778de7f3a | [
"Apache-2.0"
] | null | null | null | crichtonweb/cli/crichtoncli/commands/common.py | bpluly/crichton | a2fa09c181ba1e44ee1aae7a57769e1778de7f3a | [
"Apache-2.0"
] | null | null | null | crichtonweb/cli/crichtoncli/commands/common.py | bpluly/crichton | a2fa09c181ba1e44ee1aae7a57769e1778de7f3a | [
"Apache-2.0"
] | null | null | null | # Crichton, Admirable Source Configuration Management
# Copyright 2012 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Common utility code for commands
def print_table(table):
"""
Takes a list of arrays and prints them neatly so the columns line up in preformatted text.
The first line should be the headings, a row of underlines will automatically be added.
See commands/pendingpackages.py for example of usage.
"""
def _str(line):
return [str(x) for x in line]
# turn table of strings + objects into one only of strings
strtable = [_str(line) for line in table]
# calculate the column widths
colwidth = [0 for x in table[0]]
SPACE=2
for line in strtable:
i = 0
for col in line:
mylen = len(col)
mylen += (SPACE-mylen%4)
mylen += SPACE
if colwidth[i] < mylen:
colwidth[i] = mylen
i += 1
fmtstr = "".join(["%%-%ss" % cw for cw in colwidth])
maxlen = sum(colwidth)
print fmtstr % tuple(strtable[0])
print "_" * maxlen
for line in strtable[1:]:
print fmtstr % tuple(line)
# eof
| 31.925926 | 94 | 0.650812 |
ace6593a04f8d328f426e96e6bc751aa14c903f0 | 2,568 | py | Python | oblivion_iso_mounter/mounter.py | hellozyemlya/mo2-oblivion-iso-mounter | 3bf2ac1c5a84da5b7ab3be72c2752bcdd4f75915 | [
"MIT"
] | null | null | null | oblivion_iso_mounter/mounter.py | hellozyemlya/mo2-oblivion-iso-mounter | 3bf2ac1c5a84da5b7ab3be72c2752bcdd4f75915 | [
"MIT"
] | null | null | null | oblivion_iso_mounter/mounter.py | hellozyemlya/mo2-oblivion-iso-mounter | 3bf2ac1c5a84da5b7ab3be72c2752bcdd4f75915 | [
"MIT"
] | null | null | null | import base64
from typing import List
from PyQt5.QtCore import qCritical, QCoreApplication, qInfo
from PyQt5.QtWidgets import QWidget
from mobase import IOrganizer, PluginSetting, VersionInfo, IPlugin
import subprocess
class OblivionIsoMounter(IPlugin):
def __init__(self):
super().__init__()
self._organizer: IOrganizer = None
self._parent: QWidget = None
def author(self) -> str:
return "hellozyemlya"
def description(self) -> str:
return "Automatically mounts Oblivion game ISO image for localized legal distributions."
def init(self, organizer: IOrganizer) -> bool:
self._organizer = organizer
if not self._organizer.onAboutToRun(lambda app_name: self._do_mount_iso(app_name)):
qCritical("Unable to register start callback.")
return False
return True
def name(self) -> str:
return "Oblivion ISO Mounter"
def settings(self) -> List[PluginSetting]:
return [
PluginSetting("enabled", self.__tr("Enable automatic image mounting"), False),
PluginSetting("isoPath", self.__tr("ISO image path"), "")
]
def version(self) -> VersionInfo:
return VersionInfo(0, 0, 0, 1)
def __tr(self, str_):
return QCoreApplication.translate("OblivionIsoMounter", str_)
@property
def _iso_path(self) -> str:
return self._organizer.pluginSetting(self.name(), 'isoPath')
def _is_oblivion(self, app_name: str) -> bool:
return self._organizer.pluginSetting(self.name(), "enabled") and app_name.lower().endswith(
"oblivion.exe") and "oblivion" in self._organizer.managedGame().name().lower()
def _do_mount_iso(self, app_name: str) -> bool:
if self._is_oblivion(app_name):
qInfo(f"Mounting Oblivion ISO: {self._iso_path}")
res = subprocess.run(
[
"powershell.exe",
"-WindowStyle", "Hidden",
"-NoProfile",
"-NonInteractive",
"-EncodedCommand", self._iso_mount_command()
],
stderr=subprocess.STDOUT)
if res.returncode != 0:
qCritical(res.stdout.decode())
else:
return True
return True
def _iso_mount_command(self) -> str:
cmd = f'if(!(Get-DiskImage -ImagePath "{self._iso_path}").Attached){{Mount-DiskImage -ImagePath "{self._iso_path}"}}'
return base64.encodebytes(cmd.encode("utf-16-le")).decode()
| 35.178082 | 125 | 0.61838 |
ace659979cba41582c900b6b32cdd87b72e94161 | 16,110 | py | Python | project/mobile_face/retinaface.py | delldu/MobileFaceNet | a10b30eb707de0875b732a7158c3635abc090549 | [
"Apache-2.0"
] | 1 | 2022-01-15T21:36:57.000Z | 2022-01-15T21:36:57.000Z | project/mobile_face/retinaface.py | delldu/MobileFaceNet | a10b30eb707de0875b732a7158c3635abc090549 | [
"Apache-2.0"
] | null | null | null | project/mobile_face/retinaface.py | delldu/MobileFaceNet | a10b30eb707de0875b732a7158c3635abc090549 | [
"Apache-2.0"
] | null | null | null | import os
import math
from itertools import product as product
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models._utils as utils
from torchvision import transforms as T
import pdb
class PriorBox(object):
def __init__(self, H=256, W=256):
super(PriorBox, self).__init__()
self.min_sizes = [[16, 32], [64, 128], [256, 512]]
self.steps = [8, 16, 32]
self.H = H
self.W = W
self.feature_maps = [[math.ceil(self.H / step), math.ceil(self.W / step)] for step in self.steps]
# self.feature_maps -- [[32, 32], [16, 16], [8, 8]]
def forward(self):
anchors = []
# (Pdb) for k, f in enumerate(self.feature_maps):print(k, f)
# 0 [32, 32]
# 1 [16, 16]
# 2 [8, 8]
for k, f in enumerate(self.feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])):
for min_size in min_sizes:
s_kx = min_size / self.W
s_ky = min_size / self.H
dense_cx = [x * self.steps[k] / self.W for x in [j + 0.5]]
dense_cy = [y * self.steps[k] / self.H for y in [i + 0.5]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.Tensor(anchors).view(-1, 4)
# cx.min(), cx.max() -- 0.0160, 1.0080, cy is same as cx
# s_kx.min(), s_kx.max() -- 0.0640, 2.0480, s_ky is same s_kx
return output
# Adapted from https://github.com/Hakuyume/chainer-ssd
def decode(loc, priors, variances=[0.1, 0.2]):
boxes = torch.cat(
(
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1]),
),
1,
)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def decode_landm(pre, priors, variances=[0.1, 0.2]):
"""
decoded landm predictions
"""
landms = torch.cat(
(
priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
),
dim=1,
)
return landms
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def conv_bn(inp, oup, stride=1, leaky=0):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
def conv_bn_no_relu(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
)
def conv_bn1X1(inp, oup, stride, leaky=0):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
def conv_dw(inp, oup, stride, leaky=0.1):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.LeakyReLU(negative_slope=leaky, inplace=True),
)
class SSH(nn.Module):
def __init__(self, in_channel, out_channel):
super(SSH, self).__init__()
assert out_channel % 4 == 0
leaky = 0
if out_channel <= 64:
leaky = 0.1
self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)
self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)
self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
self.conv7X7_2 = conv_bn(out_channel // 4, out_channel // 4, stride=1, leaky=leaky)
self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)
def forward(self, input):
conv3X3 = self.conv3X3(input)
conv5X5_1 = self.conv5X5_1(input)
conv5X5 = self.conv5X5_2(conv5X5_1)
conv7X7_2 = self.conv7X7_2(conv5X5_1)
conv7X7 = self.conv7x7_3(conv7X7_2)
out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)
out = F.relu(out)
return out
class FPN(nn.Module):
def __init__(self, in_channels_list, out_channels):
super(FPN, self).__init__()
leaky = 0
if out_channels <= 64:
leaky = 0.1
self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride=1, leaky=leaky)
self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride=1, leaky=leaky)
self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride=1, leaky=leaky)
self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)
self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)
def forward(self, input):
# names = list(input.keys())
input = list(input.values())
output1 = self.output1(input[0])
output2 = self.output2(input[1])
output3 = self.output3(input[2])
up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest")
output2 = output2 + up3
output2 = self.merge2(output2)
up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest")
output1 = output1 + up2
output1 = self.merge1(output1)
out = [output1, output2, output3]
return out
class MobileNetV1(nn.Module):
def __init__(self):
super(MobileNetV1, self).__init__()
self.stage1 = nn.Sequential(
conv_bn(3, 8, 2, leaky=0.1), # 3
conv_dw(8, 16, 1), # 7
conv_dw(16, 32, 2), # 11
conv_dw(32, 32, 1), # 19
conv_dw(32, 64, 2), # 27
conv_dw(64, 64, 1), # 43
)
self.stage2 = nn.Sequential(
conv_dw(64, 128, 2), # 43 + 16 = 59
conv_dw(128, 128, 1), # 59 + 32 = 91
conv_dw(128, 128, 1), # 91 + 32 = 123
conv_dw(128, 128, 1), # 123 + 32 = 155
conv_dw(128, 128, 1), # 155 + 32 = 187
conv_dw(128, 128, 1), # 187 + 32 = 219
)
self.stage3 = nn.Sequential(
conv_dw(128, 256, 2), # 219 +3 2 = 241
conv_dw(256, 256, 1), # 241 + 64 = 301
)
self.avg = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(256, 1000)
def forward(self, x):
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.avg(x)
# x = self.model(x)
x = x.view(-1, 256)
x = self.fc(x)
return x
class ClassHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(ClassHead, self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 2)
class BboxHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 4)
class LandmarkHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 10)
class RetinaFace(nn.Module):
def __init__(self, phase="test"):
"""
:param phase: train or test.
"""
super(RetinaFace, self).__init__()
self.phase = phase
backbone = MobileNetV1()
self.body = utils.IntermediateLayerGetter(backbone, {"stage1": 1, "stage2": 2, "stage3": 3})
in_channels_stage2 = 32
in_channels_list = [
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = 64
self.fpn = FPN(in_channels_list, out_channels)
self.ssh1 = SSH(out_channels, out_channels)
self.ssh2 = SSH(out_channels, out_channels)
self.ssh3 = SSH(out_channels, out_channels)
self.ClassHead = self._make_class_head(fpn_num=3, inchannels=out_channels)
self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=out_channels)
self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=out_channels)
def _make_class_head(self, fpn_num=3, inchannels=64, anchor_num=2):
classhead = nn.ModuleList()
for i in range(fpn_num):
classhead.append(ClassHead(inchannels, anchor_num))
return classhead
def _make_bbox_head(self, fpn_num=3, inchannels=64, anchor_num=2):
bboxhead = nn.ModuleList()
for i in range(fpn_num):
bboxhead.append(BboxHead(inchannels, anchor_num))
return bboxhead
def _make_landmark_head(self, fpn_num=3, inchannels=64, anchor_num=2):
landmarkhead = nn.ModuleList()
for i in range(fpn_num):
landmarkhead.append(LandmarkHead(inchannels, anchor_num))
return landmarkhead
def forward(self, inputs):
# inputs.size() -- torch.Size([1, 3, 250, 250]), min = -119 , max = 151
out = self.body(inputs)
# out.keys() -- odict_keys([1, 2, 3])
# out[1].size(), out[2].size(), out[3].size()
# [1, 64, 32, 32], [1, 128, 16, 16], [1, 256, 8, 8]
# FPN
fpn = self.fpn(out)
# type(fpn) -- <class 'list'>, (Pdb) len(fpn) -- 3
# fpn[0].size(), fpn[1].size(), fpn[2].size()
# [1, 64, 32, 32], [1, 64, 16, 16], [1, 64, 8, 8]
# SSH
feature1 = self.ssh1(fpn[0])
feature2 = self.ssh2(fpn[1])
feature3 = self.ssh3(fpn[2])
# feature1.size() -- [1, 64, 32, 32]
# feature2.size() -- [1, 64, 16, 16]
# feature3.size() -- [1, 64, 8, 8]
features = [feature1, feature2, feature3]
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1)
ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1)
# bbox_regressions.size() -- [1, 2688, 4]
# classifications.size() -- [1, 2688, 2]
# ldm_regressions.size() -- [1, 2688, 10]
if self.phase == "train":
output = (bbox_regressions, classifications, ldm_regressions)
else:
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
return output
def get_backbone():
"""Create model."""
cdir = os.path.dirname(__file__)
checkpoint = "models/retina_face.pth" if cdir == "" else cdir + "/models/retina_face.pth"
model = RetinaFace()
model.load_state_dict(torch.load(checkpoint))
model = model.eval()
return model
class Detector(object):
"""Mobile face detecor"""
def __init__(self, device=torch.device("cuda")):
self.device = device
self.backbone = get_backbone().to(device)
self.transform = T.Normalize([0.485, 0.456, 0.406], [1.0, 1.0, 1.0])
def __call__(self, input_tensor):
for i in range(input_tensor.size(0)):
input_tensor[i] = self.transform(input_tensor[i])
input_tensor = input_tensor * 255.0
with torch.no_grad():
loc, conf, landms = self.backbone(input_tensor)
# hyper parameters for NMS
confidence_threshold = 0.9
top_k = 5000
nms_threshold = 0.4
keep_top_k = 750
H, W = input_tensor.size(2), input_tensor.size(3)
scale = torch.Tensor([W, H, W, H]).to(self.device)
priorbox = PriorBox(H=H, W=W)
priors = priorbox.forward()
priors = priors.to(self.device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data)
boxes = boxes * scale
boxes = boxes.cpu().numpy()
# boxes.shape -- (2688, 4)
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data)
scale1 = torch.Tensor(
[
W,
H,
W,
H,
W,
H,
W,
H,
W,
H,
]
).to(self.device)
landms = landms * scale1
landms = landms.cpu().numpy()
# ignore low scores
# confidence_threshold -- 0.9
inds = np.where(scores > confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, nms_threshold)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:keep_top_k, :]
landms = landms[:keep_top_k, :]
# print(landms.shape)
landms = landms.reshape(-1, 5, 2)
# print(landms.shape)
landms = landms.transpose(0, 2, 1)
# print(landms.shape)
landms = landms.reshape(-1, 10)
# print(landms.shape)
# dets.shape, landms.shape -- ((1, 5), (1, 10))
# dets -- array([[ 78.220116 , 79.84813 , 173.14445 , 195.16386 ,0.99955505]]
# -----------------------------------------------------------------------------------
# landms -- array([[102.47213 , 145.46236 , 125.38177 , 106.445854, 146.62794 ,
# 118.52239 , 115.24955 , 140.87106 , 159.90822 , 156.94913 ]]
return len(dets) > 0, dets, landms
if __name__ == "__main__":
import sys
from PIL import Image
model = Detector(torch.device("cuda"))
# input_tensor = torch.randn(1, 3, 256, 256).to(model.device)
image = Image.open(sys.argv[1]).convert("RGB")
input_tensor = T.ToTensor()(image).to(model.device).unsqueeze(0)
hasface, dets, landms = model(input_tensor)
print(model.backbone)
print("input_tensor: ", input_tensor.size())
print("detect: ", hasface)
print("bboxes: ", dets)
print("landms: ", landms)
if hasface:
draw(image, bboxes, landms)
# image.show()
# align(image, landms)
| 32.677485 | 113 | 0.557666 |
ace65a807ff6f788532c7bf450f747c714e7ba38 | 2,321 | py | Python | awsprofile/mac.py | inceenes10/awsprofile | eb1672598650e409908bf583b623b034f2cf135c | [
"MIT"
] | null | null | null | awsprofile/mac.py | inceenes10/awsprofile | eb1672598650e409908bf583b623b034f2cf135c | [
"MIT"
] | null | null | null | awsprofile/mac.py | inceenes10/awsprofile | eb1672598650e409908bf583b623b034f2cf135c | [
"MIT"
] | null | null | null | import sys
import os
import re
from simple_term_menu import TerminalMenu
import rich
home_directory = os.path.expanduser("~")
def get_aws_profiles():
aws_credential_file_path = os.path.join(home_directory, ".aws/credentials")
aws_profiles = []
aws_profile_name_regex = re.compile("^\[[A-z0-9]{1,}]$")
lines = open(aws_credential_file_path, "r").read().splitlines()
lines = [line.replace(" ", "") for line in lines if line]
for line in lines:
aws_profile_name_matched = aws_profile_name_regex.search(line)
if aws_profile_name_matched:
aws_profile_name = aws_profile_name_matched.group()[1:-1]
aws_profiles.append(aws_profile_name)
return aws_profiles
def main():
args = sys.argv[1:]
# read zshrc file
zshrc_path = os.path.join(home_directory, ".zshrc")
zshrc_read = open(zshrc_path, "r")
zshrc_lines = zshrc_read.readlines()
zshrc_read.close()
if len(args) == 0:
aws_profiles = get_aws_profiles()
profile_name = aws_profiles[TerminalMenu(aws_profiles).show()]
changed_profile = False
for i in range(len(zshrc_lines)):
line = zshrc_lines[i].replace(" ", "")
if "exportAWS_PROFILE=" in line:
zshrc_lines[i] = "export AWS_PROFILE=" + profile_name + "\n"
changed_profile = True
break
if changed_profile == False:
zshrc_lines.append("export AWS_PROFILE=" + profile_name + "\n")
zshrc = open(zshrc_path, "w")
zshrc.writelines(zshrc_lines)
zshrc.close()
rich.print("[bold green]SUCCESS: [white]Your AWS profile is changed [green]=> [white]" + profile_name, "[bold]")
print()
rich.print("[bold green]Open a new tab to load new AWS profile")
elif args[0] == "now":
for i in range(len(zshrc_lines)):
line = zshrc_lines[i].replace(" ", "").replace("\n", "")
if "exportAWS_PROFILE=" in line:
aws_profile = line.split("=")[1]
rich.print("[bold]Your default AWS profile [bold green]=> [white]" + aws_profile)
else:
rich.print("[bold red]ERROR: It occured an error when running the program") | 35.707692 | 120 | 0.600172 |
ace65a9450180896f0c34ecc7bf60b223e95aac4 | 4,027 | py | Python | desktop/core/ext-py/urllib3-1.25.8/test/conftest.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/urllib3-1.25.8/test/conftest.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/urllib3-1.25.8/test/conftest.py | yetsun/hue | 2e48f0cc70e233ee0e1b40733d4b2a18d8836c66 | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | import collections
import contextlib
import threading
import platform
import sys
import pytest
import trustme
from tornado import web, ioloop
from dummyserver.handlers import TestingApp
from dummyserver.server import run_tornado_app
from dummyserver.server import (
DEFAULT_CA,
DEFAULT_CA_KEY,
CLIENT_INTERMEDIATE_PEM,
CLIENT_NO_INTERMEDIATE_PEM,
CLIENT_INTERMEDIATE_KEY,
HAS_IPV6,
)
# The Python 3.8+ default loop on Windows breaks Tornado
@pytest.fixture(scope="session", autouse=True)
def configure_windows_event_loop():
if sys.version_info >= (3, 8) and platform.system() == "Windows":
import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
@pytest.fixture(scope="session")
def certs_dir(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
# Start from existing root CA as we don't want to change the server certificate yet
with open(DEFAULT_CA, "rb") as crt, open(DEFAULT_CA_KEY, "rb") as key:
root_ca = trustme.CA.from_pem(crt.read(), key.read())
# client cert chain
intermediate_ca = root_ca.create_child_ca()
cert = intermediate_ca.issue_cert(u"example.com")
cert.private_key_pem.write_to_path(str(tmpdir / CLIENT_INTERMEDIATE_KEY))
# Write the client cert and the intermediate CA
client_cert = str(tmpdir / CLIENT_INTERMEDIATE_PEM)
cert.cert_chain_pems[0].write_to_path(client_cert)
cert.cert_chain_pems[1].write_to_path(client_cert, append=True)
# Write only the client cert
cert.cert_chain_pems[0].write_to_path(str(tmpdir / CLIENT_NO_INTERMEDIATE_PEM))
yield tmpdir
ServerConfig = collections.namedtuple("ServerConfig", ["host", "port", "ca_certs"])
@contextlib.contextmanager
def run_server_in_thread(scheme, host, tmpdir, ca, server_cert):
ca_cert_path = str(tmpdir / "ca.pem")
server_cert_path = str(tmpdir / "server.pem")
server_key_path = str(tmpdir / "server.key")
ca.cert_pem.write_to_path(ca_cert_path)
server_cert.private_key_pem.write_to_path(server_key_path)
server_cert.cert_chain_pems[0].write_to_path(server_cert_path)
server_certs = {"keyfile": server_key_path, "certfile": server_cert_path}
io_loop = ioloop.IOLoop.current()
app = web.Application([(r".*", TestingApp)])
server, port = run_tornado_app(app, io_loop, server_certs, scheme, host)
server_thread = threading.Thread(target=io_loop.start)
server_thread.start()
yield ServerConfig(host, port, ca_cert_path)
io_loop.add_callback(server.stop)
io_loop.add_callback(io_loop.stop)
server_thread.join()
@pytest.fixture
def no_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# only common name, no subject alternative names
server_cert = ca.issue_cert(common_name=u"localhost")
with run_server_in_thread("https", "localhost", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ip_san_server(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
server_cert = ca.issue_cert(u"127.0.0.1")
with run_server_in_thread("https", "127.0.0.1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ipv6_addr_server(tmp_path_factory):
if not HAS_IPV6:
pytest.skip("Only runs on IPv6 systems")
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Common Name
server_cert = ca.issue_cert(common_name=u"::1")
with run_server_in_thread("https", "::1", tmpdir, ca, server_cert) as cfg:
yield cfg
@pytest.fixture
def ipv6_san_server(tmp_path_factory):
if not HAS_IPV6:
pytest.skip("Only runs on IPv6 systems")
tmpdir = tmp_path_factory.mktemp("certs")
ca = trustme.CA()
# IP address in Subject Alternative Name
server_cert = ca.issue_cert(u"::1")
with run_server_in_thread("https", "::1", tmpdir, ca, server_cert) as cfg:
yield cfg
| 31.460938 | 87 | 0.728582 |
ace65b0fc9c525e8a5d46393d217cff937665022 | 4,932 | py | Python | tests/test_tools.py | TUW-GEO/geospade | 28eee3c6bf93dc3d2ceb026e310e7fca023dd722 | [
"MIT"
] | 2 | 2021-10-20T09:26:34.000Z | 2021-11-04T08:42:32.000Z | tests/test_tools.py | TUW-GEO/geospade | 28eee3c6bf93dc3d2ceb026e310e7fca023dd722 | [
"MIT"
] | 15 | 2020-10-02T14:15:27.000Z | 2021-04-19T08:34:22.000Z | tests/test_tools.py | TUW-GEO/geospade | 28eee3c6bf93dc3d2ceb026e310e7fca023dd722 | [
"MIT"
] | 2 | 2019-10-25T14:17:48.000Z | 2021-03-14T09:05:16.000Z | """ Test suite for the tools module. """
import unittest
import numpy as np
from shapely.geometry import Polygon
import ogr
from geospade.tools import get_quadrant
from geospade.tools import rasterise_polygon
class ToolsTest(unittest.TestCase):
""" Tests all functions in the tools module. """
def test_get_quadrant(self):
""" Tests all 5 cases of quadrants (1, 2, 3, 4, None). """
x = 1
y = 1
assert get_quadrant(x, y) == 1
x = -1
y = 1
assert get_quadrant(x, y) == 2
x = -1
y = -1
assert get_quadrant(x, y) == 3
x = 1
y = -1
assert get_quadrant(x, y) == 4
x = 1
y = 0
assert get_quadrant(x, y) is None
def test_rasterise_polygon(self):
""" Tests rasterisation of a polygon. """
ref_raster = np.array([[0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 0, 0]])
ref_raster = np.array(ref_raster)
poly_pts = [(1, 1), (1, 4), (5, 8), (6, 8), (6, 5), (8, 3), (6, 1), (1, 1)]
geom = Polygon(poly_pts)
raster = rasterise_polygon(geom, 1, 1)
assert np.all(raster == ref_raster)
ref_raster = np.array([[1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]])
ref_raster = np.array(ref_raster)
poly_pts = [(1, 1), (1, 7), (5, 3), (8, 6), (8, 1), (1, 1)]
geom = Polygon(poly_pts)
raster = rasterise_polygon(geom, 1, 1)
assert np.all(raster == ref_raster)
def test_rasterise_polygon_buffer(self):
""" Tests rasterisation of a polygon (with buffering). """
poly_pts = [(1, 1), (1, 4), (5, 8), (6, 8), (6, 5), (8, 3), (6, 1), (1, 1)]
geom = Polygon(poly_pts)
ref_raster = np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
ref_raster = np.array(ref_raster)
raster = rasterise_polygon(geom, 1, 1, buffer=-1)
assert np.all(raster == ref_raster)
ref_raster = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 0, 0]])
ref_raster = np.array(ref_raster)
raster = rasterise_polygon(geom, 1, 1, buffer=-1, keep_shape=False)
assert np.all(raster == ref_raster)
ref_raster = np.array([[0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1]])
ref_raster = np.array(ref_raster)
raster = rasterise_polygon(geom, 1, 1, buffer=1)
assert np.all(raster == ref_raster)
ref_raster = np.array([[0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0]])
ref_raster = np.array(ref_raster)
raster = rasterise_polygon(geom, 1, 1, buffer=1, keep_shape=False)
assert np.all(raster == ref_raster)
if __name__ == '__main__':
unittest.main()
| 38.232558 | 83 | 0.355231 |
ace65b489d35382d189bf5946f0f3c1dfb507b03 | 21,049 | py | Python | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_prebuilts_from_url_async.py | moovy2/azure-sdk-for-python | 6b0495dc9917d47a7264f26cbd3221d43461a537 | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_prebuilts_from_url_async.py | moovy2/azure-sdk-for-python | 6b0495dc9917d47a7264f26cbd3221d43461a537 | [
"MIT"
] | null | null | null | sdk/formrecognizer/azure-ai-formrecognizer/tests/test_dac_analyze_prebuilts_from_url_async.py | moovy2/azure-sdk-for-python | 6b0495dc9917d47a7264f26cbd3221d43461a537 | [
"MIT"
] | null | null | null | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from datetime import date, time
from devtools_testutils.aio import recorded_by_proxy_async
from devtools_testutils import set_bodiless_matcher
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ServiceRequestError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer.aio import DocumentAnalysisClient
from azure.ai.formrecognizer import AnalyzeResult
from azure.ai.formrecognizer._generated.v2021_09_30_preview.models import AnalyzeResultOperation
from preparers import FormRecognizerPreparer
from asynctestcase import AsyncFormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
DocumentAnalysisClientPreparer = functools.partial(_GlobalClientPreparer, DocumentAnalysisClient)
class TestDACAnalyzePrebuiltsfromUrlAsync(AsyncFormRecognizerTest):
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_business_card_multipage_pdf(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-businessCard", self.business_card_multipage_url_pdf)
result = await poller.result()
assert len(result.documents) == 2
business_card = result.documents[0]
assert len(business_card.fields.get("ContactNames").value) == 1
assert business_card.fields.get("ContactNames").value[0].value['FirstName'].value == 'JOHN'
assert business_card.fields.get("ContactNames").value[0].value['LastName'].value == 'SINGER'
assert len(business_card.fields.get("JobTitles").value) == 1
assert business_card.fields.get("JobTitles").value[0].value == "Software Engineer"
assert len(business_card.fields.get("Emails").value) == 1
assert business_card.fields.get("Emails").value[0].value == "johnsinger@contoso.com"
assert len(business_card.fields.get("Websites").value) == 1
assert business_card.fields.get("Websites").value[0].value == "https://www.contoso.com"
assert len(business_card.fields.get("OtherPhones").value) == 1
assert business_card.fields.get("OtherPhones").value[0].value == "+14257793479"
business_card = result.documents[1]
assert len(business_card.fields.get("ContactNames").value) == 1
assert business_card.fields.get("ContactNames").value[0].value['FirstName'].value == 'Avery'
assert business_card.fields.get("ContactNames").value[0].value['LastName'].value == 'Smith'
assert len(business_card.fields.get("JobTitles").value) == 1
assert business_card.fields.get("JobTitles").value[0].value == "Senior Researcher"
assert len(business_card.fields.get("Departments").value) == 1
assert business_card.fields.get("Departments").value[0].value == "Cloud & Al Department"
assert len(business_card.fields.get("Emails").value) == 1
assert business_card.fields.get("Emails").value[0].value == "avery.smith@contoso.com"
assert len(business_card.fields.get("Websites").value) == 1
assert business_card.fields.get("Websites").value[0].value == "https://www.contoso.com/"
# The phone number values are not getting normalized to a phone number type. Just assert on text.
assert len(business_card.fields.get("MobilePhones").value) == 1
assert business_card.fields.get("MobilePhones").value[0].content == "+44 (0) 7911 123456"
assert len(business_card.fields.get("WorkPhones").value) == 1
assert business_card.fields.get("WorkPhones").value[0].content == "+44 (0) 20 9876 5432"
assert len(business_card.fields.get("Faxes").value) == 1
assert business_card.fields.get("Faxes").value[0].content == "+44 (0) 20 6789 2345"
assert len(business_card.fields.get("Addresses").value) == 1
assert business_card.fields.get("Addresses").value[0].value == "2 Kingdom Street Paddington, London, W2 6BD"
assert len(business_card.fields.get("CompanyNames").value) == 1
assert business_card.fields.get("CompanyNames").value[0].value == "Contoso"
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_identity_document_jpg_passport(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-idDocument", self.identity_document_url_jpg_passport)
result = await poller.result()
assert len(result.documents) == 1
id_document = result.documents[0]
# check dict values
passport = id_document.fields.get("MachineReadableZone").value
assert passport["LastName"].value == "MARTIN"
assert passport["FirstName"].value == "SARAH"
assert passport["DocumentNumber"].value == "ZE000509"
assert passport["DateOfBirth"].value == date(1985,1,1)
assert passport["DateOfExpiration"].value == date(2023,1,14)
assert passport["Sex"].value == "F"
assert passport["CountryRegion"].value == "CAN"
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_identity_document_jpg(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-idDocument", self.identity_document_url_jpg)
result = await poller.result()
assert len(result.documents) == 1
id_document = result.documents[0]
# check dict values
assert id_document.fields.get("LastName").value == "TALBOT"
assert id_document.fields.get("FirstName").value == "LIAM R."
assert id_document.fields.get("DocumentNumber").value == "WDLABCD456DG"
assert id_document.fields.get("DateOfBirth").value == date(1958,1,6)
assert id_document.fields.get("DateOfExpiration").value == date(2020,8,12)
assert id_document.fields.get("Sex").value == "M"
assert id_document.fields.get("Address").value == "123 STREET ADDRESS YOUR CITY WA 99999-1234"
assert id_document.fields.get("CountryRegion").value == "USA"
assert id_document.fields.get("Region").value == "Washington"
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_invoice_tiff(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_analyze_document_from_url(model="prebuilt-invoice", document_url=self.invoice_url_tiff)
result = await poller.result()
assert len(result.documents) == 1
invoice = result.documents[0]
# check dict values
assert invoice.fields.get("VendorName").value == "Contoso"
assert invoice.fields.get("VendorAddress").value, '1 Redmond way Suite 6000 Redmond == WA 99243'
assert invoice.fields.get("CustomerAddressRecipient").value == "Microsoft"
assert invoice.fields.get("CustomerAddress").value, '1020 Enterprise Way Sunnayvale == CA 87659'
assert invoice.fields.get("CustomerName").value == "Microsoft"
assert invoice.fields.get("InvoiceId").value == '34278587'
assert invoice.fields.get("InvoiceDate").value, date(2017, 6 == 18)
assert invoice.fields.get("Items").value[0].value["Amount"].value == 56651.49
assert invoice.fields.get("DueDate").value, date(2017, 6 == 24)
@FormRecognizerPreparer()
@recorded_by_proxy_async
async def test_polling_interval(self, formrecognizer_test_endpoint, formrecognizer_test_api_key, **kwargs):
set_bodiless_matcher()
client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential(formrecognizer_test_api_key), polling_interval=7)
assert client._client._config.polling_interval == 7
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg, polling_interval=6)
await poller.wait()
assert poller._polling_method._timeout == 6
poller2 = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg)
await poller2.wait()
assert poller2._polling_method._timeout == 7 # goes back to client default
@pytest.mark.live_test_only
@FormRecognizerPreparer()
async def test_active_directory_auth_async(self):
token = self.generate_oauth_token()
endpoint = self.get_oauth_endpoint()
client = DocumentAnalysisClient(endpoint, token)
async with client:
poller = await client.begin_analyze_document_from_url(
"prebuilt-receipt",
self.receipt_url_jpg
)
result = await poller.result()
assert result is not None
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipts_encoded_url(self, client):
set_bodiless_matcher()
with pytest.raises(HttpResponseError) as e:
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", "https://fakeuri.com/blank%20space")
assert "https://fakeuri.com/blank%20space" in e.value.response.request.body
@pytest.mark.skip()
@FormRecognizerPreparer()
@recorded_by_proxy_async
async def test_receipt_url_bad_endpoint(self, formrecognizer_test_endpoint, formrecognizer_test_api_key, **kwargs):
set_bodiless_matcher()
with pytest.raises(ServiceRequestError):
client = DocumentAnalysisClient("http://notreal.azure.com", AzureKeyCredential(formrecognizer_test_api_key))
async with client:
poller = await client.begin_analyze_document_from_url(
"prebuilt-receipt",
self.receipt_url_jpg
)
result = await poller.result()
@FormRecognizerPreparer()
@recorded_by_proxy_async
async def test_receipt_url_auth_bad_key(self, formrecognizer_test_endpoint, formrecognizer_test_api_key, **kwargs):
set_bodiless_matcher()
client = DocumentAnalysisClient(formrecognizer_test_endpoint, AzureKeyCredential("xxxx"))
with pytest.raises(ClientAuthenticationError):
async with client:
poller = await client.begin_analyze_document_from_url(
"prebuilt-receipt",
self.receipt_url_jpg
)
result = await poller.result()
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipt_bad_url(self, client):
set_bodiless_matcher()
with pytest.raises(HttpResponseError):
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", "https://badurl.jpg")
result = await poller.result()
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipt_url_pass_stream(self, client):
set_bodiless_matcher()
with open(self.receipt_png, "rb") as fd:
receipt = fd.read(4) # makes the recording smaller
with pytest.raises(HttpResponseError):
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", receipt)
result = await poller.result()
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipt_url_transform_jpg(self, client):
set_bodiless_matcher()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_receipt = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_receipt)
async with client:
poller = await client.begin_analyze_document_from_url(
"prebuilt-receipt",
self.receipt_url_jpg,
cls=callback
)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipt_url_png(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_png)
result = await poller.result()
assert len(result.documents) == 1
receipt = result.documents[0]
assert receipt.fields.get("MerchantAddress").value, '123 Main Street Redmond == WA 98052'
assert receipt.fields.get("MerchantName").value == 'Contoso'
assert receipt.fields.get("Subtotal").value == 1098.99
assert receipt.fields.get("Tax").value == 104.4
assert receipt.fields.get("Total").value == 1203.39
assert receipt.fields.get("TransactionDate").value == date(year=2019, month=6, day=10)
assert receipt.fields.get("TransactionTime").value == time(hour=13, minute=59, second=0)
receipt_type = receipt.fields.get("ReceiptType")
assert receipt_type.confidence is not None
assert receipt_type.value == 'Itemized'
assert len(result.pages) == 1
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipt_multipage_url(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.multipage_receipt_url_pdf)
result = await poller.result()
assert len(result.documents) == 2
receipt = result.documents[0]
assert receipt.fields.get("MerchantAddress").value, '123 Main Street Redmond == WA 98052'
assert receipt.fields.get("MerchantName").value == 'Contoso'
assert receipt.fields.get("MerchantPhoneNumber").value == '+19876543210'
assert receipt.fields.get("Subtotal").value == 11.7
assert receipt.fields.get("Tax").value == 1.17
assert receipt.fields.get("Tip").value == 1.63
assert receipt.fields.get("Total").value == 14.5
assert receipt.fields.get("TransactionDate").value == date(year=2019, month=6, day=10)
assert receipt.fields.get("TransactionTime").value == time(hour=13, minute=59, second=0)
receipt_type = receipt.fields.get("ReceiptType")
assert receipt_type.confidence is not None
assert receipt_type.value == 'Itemized'
receipt = result.documents[1]
assert receipt.fields.get("MerchantAddress").value, '123 Main Street Redmond == WA 98052'
assert receipt.fields.get("MerchantName").value == 'Contoso'
assert receipt.fields.get("Subtotal").value == 1098.99
assert receipt.fields.get("Tax").value == 104.4
assert receipt.fields.get("Total").value == 1203.39
assert receipt.fields.get("TransactionDate").value == date(year=2019, month=6, day=10)
assert receipt.fields.get("TransactionTime").value == time(hour=13, minute=59, second=0)
receipt_type = receipt.fields.get("ReceiptType")
assert receipt_type.confidence is not None
assert receipt_type.value == 'Itemized'
assert len(result.pages) == 2
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipt_multipage_transform_url(self, client):
set_bodiless_matcher()
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeResultOperation, raw_response)
extracted_receipt = AnalyzeResult._from_generated(analyze_result.analyze_result)
responses.append(analyze_result)
responses.append(extracted_receipt)
async with client:
poller = await client.begin_analyze_document_from_url(
"prebuilt-receipt",
self.multipage_receipt_url_pdf,
cls=callback
)
result = await poller.result()
raw_analyze_result = responses[0].analyze_result
returned_model = responses[1]
# Check AnalyzeResult
assert returned_model.model_id == raw_analyze_result.model_id
assert returned_model.api_version == raw_analyze_result.api_version
assert returned_model.content == raw_analyze_result.content
self.assertDocumentPagesTransformCorrect(returned_model.pages, raw_analyze_result.pages)
self.assertDocumentTransformCorrect(returned_model.documents, raw_analyze_result.documents)
self.assertDocumentTablesTransformCorrect(returned_model.tables, raw_analyze_result.tables)
self.assertDocumentKeyValuePairsTransformCorrect(returned_model.key_value_pairs, raw_analyze_result.key_value_pairs)
self.assertDocumentEntitiesTransformCorrect(returned_model.entities, raw_analyze_result.entities)
self.assertDocumentStylesTransformCorrect(returned_model.styles, raw_analyze_result.styles)
# check page range
assert len(raw_analyze_result.pages) == len(returned_model.pages)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
async def test_receipt_continuation_token(self, client):
async with client:
initial_poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg)
cont_token = initial_poller.continuation_token()
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", None, continuation_token=cont_token)
result = await poller.result()
assert result is not None
await initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipt_locale_specified(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg, locale="en-IN")
assert 'en-IN' == poller._polling_method._initial_response.http_response.request.query['locale']
result = await poller.result()
assert result
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_receipt_locale_error(self, client):
set_bodiless_matcher()
with pytest.raises(HttpResponseError) as e:
async with client:
await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg, locale="not a locale")
assert "InvalidArgument" == e.value.error.code
@FormRecognizerPreparer()
@DocumentAnalysisClientPreparer()
@recorded_by_proxy_async
async def test_pages_kwarg_specified(self, client):
set_bodiless_matcher()
async with client:
poller = await client.begin_analyze_document_from_url("prebuilt-receipt", self.receipt_url_jpg, pages="1")
assert '1' == poller._polling_method._initial_response.http_response.request.query['pages']
result = await poller.result()
assert result
| 48.5 | 138 | 0.693715 |
ace65bd91b931a634f327b6db54d44c21e3da692 | 25,056 | py | Python | test/test_loader.py | BlackWidowMovie0/sopel | ab040a5d177855874bea0b975be169d6efbe2ee3 | [
"EFL-2.0"
] | 1 | 2021-03-17T16:55:25.000Z | 2021-03-17T16:55:25.000Z | test/test_loader.py | BlackWidowMovie0/sopel | ab040a5d177855874bea0b975be169d6efbe2ee3 | [
"EFL-2.0"
] | null | null | null | test/test_loader.py | BlackWidowMovie0/sopel | ab040a5d177855874bea0b975be169d6efbe2ee3 | [
"EFL-2.0"
] | null | null | null | # coding=utf-8
"""Tests for the ``sopel.loader`` module."""
from __future__ import absolute_import, division, print_function, unicode_literals
import inspect
import re
import pytest
from sopel import loader, module, plugins
MOCK_MODULE_CONTENT = """# coding=utf-8
import re
import sopel.module
import sopel.plugin
@sopel.module.commands("first")
def first_command(bot, trigger):
pass
@sopel.module.commands("second")
def second_command(bot, trigger):
pass
@sopel.module.interval(5)
def interval5s(bot):
pass
@sopel.module.interval(10)
def interval10s(bot):
pass
@sopel.module.url(r'.\\.example\\.com')
def example_url(bot, trigger, match=None):
pass
def loader(settings):
return [re.compile(r'.+\\.example\\.com')]
@sopel.plugin.url_lazy(loader)
def example_url_lazy(bot, trigger):
pass
@sopel.module.event('TOPIC')
def on_topic_command(bot):
pass
def shutdown():
pass
def ignored():
pass
@sopel.module.rate(10)
def ignored_rate():
pass
class Ignored:
def __init__(self):
self.rule = [r'.*']
def __call__(self, bot, trigger):
pass
ignored_obj = Ignored()
def ignored_trickster():
pass
ignored_trickster._sopel_callable = True
"""
@pytest.fixture
def func():
"""Pytest fixture to get a function that will return True all the time"""
def bot_command():
"""Test callable defined as a pytest fixture."""
return True
return bot_command
TMP_CONFIG = """
[core]
owner = testnick
nick = TestBot
"""
@pytest.fixture
def tmpconfig(configfactory):
return configfactory('conf.ini', TMP_CONFIG)
@pytest.fixture
def testplugin(tmpdir):
root = tmpdir.mkdir('loader_mods')
mod_file = root.join('file_mod.py')
mod_file.write(MOCK_MODULE_CONTENT)
return plugins.handlers.PyFilePlugin(mod_file.strpath)
def test_is_limitable(testplugin):
"""Test is_limitable behavior before clean_module is called."""
testplugin.load()
test_mod = testplugin._module
assert loader.is_limitable(test_mod.first_command)
assert loader.is_limitable(test_mod.second_command)
assert loader.is_limitable(test_mod.on_topic_command)
assert not loader.is_limitable(test_mod.interval5s)
assert not loader.is_limitable(test_mod.interval10s)
assert not loader.is_limitable(test_mod.shutdown)
assert loader.is_limitable(test_mod.example_url)
assert loader.is_limitable(test_mod.example_url_lazy)
def test_is_triggerable(testplugin):
"""Test is_triggerable behavior before clean_module is called."""
testplugin.load()
test_mod = testplugin._module
assert loader.is_triggerable(test_mod.first_command)
assert loader.is_triggerable(test_mod.second_command)
assert loader.is_triggerable(test_mod.on_topic_command)
assert not loader.is_triggerable(test_mod.interval5s)
assert not loader.is_triggerable(test_mod.interval10s)
assert not loader.is_triggerable(test_mod.shutdown)
assert not loader.is_triggerable(test_mod.example_url)
assert not loader.is_triggerable(test_mod.example_url_lazy)
def test_is_url_callback(testplugin):
"""Test is_triggerable behavior before clean_module is called."""
testplugin.load()
test_mod = testplugin._module
assert not loader.is_url_callback(test_mod.first_command)
assert not loader.is_url_callback(test_mod.second_command)
assert not loader.is_url_callback(test_mod.on_topic_command)
assert not loader.is_url_callback(test_mod.interval5s)
assert not loader.is_url_callback(test_mod.interval10s)
assert not loader.is_url_callback(test_mod.shutdown)
assert loader.is_url_callback(test_mod.example_url)
assert loader.is_url_callback(test_mod.example_url_lazy)
def test_clean_module(testplugin, tmpconfig):
testplugin.load()
test_mod = testplugin._module
callables, jobs, shutdowns, urls = loader.clean_module(
test_mod, tmpconfig)
assert len(callables) == 3
assert test_mod.first_command in callables
assert test_mod.second_command in callables
assert test_mod.on_topic_command in callables
assert len(jobs) == 2
assert test_mod.interval5s in jobs
assert test_mod.interval10s in jobs
assert len(shutdowns)
assert test_mod.shutdown in shutdowns
assert len(urls) == 2
assert test_mod.example_url in urls
assert test_mod.example_url_lazy in urls
# assert is_triggerable behavior *after* clean_module has been called
assert loader.is_triggerable(test_mod.first_command)
assert loader.is_triggerable(test_mod.second_command)
assert loader.is_triggerable(test_mod.on_topic_command)
assert not loader.is_triggerable(test_mod.interval5s)
assert not loader.is_triggerable(test_mod.interval10s)
assert not loader.is_triggerable(test_mod.shutdown)
assert not loader.is_triggerable(test_mod.example_url)
assert not loader.is_triggerable(test_mod.example_url_lazy)
# ignored function is ignored
assert test_mod.ignored not in callables
assert test_mod.ignored not in jobs
assert test_mod.ignored not in shutdowns
assert test_mod.ignored not in urls
# @rate doesn't create a callable and is ignored
assert test_mod.ignored_rate not in callables
assert test_mod.ignored_rate not in jobs
assert test_mod.ignored_rate not in shutdowns
assert test_mod.ignored_rate not in urls
# object with a triggerable attribute are ignored by default
assert loader.is_triggerable(test_mod.ignored_obj)
assert test_mod.ignored_obj not in callables
assert test_mod.ignored_obj not in jobs
assert test_mod.ignored_obj not in shutdowns
assert test_mod.ignored_obj not in urls
# trickster function is ignored: it's still not a proper plugin callable
assert not loader.is_triggerable(test_mod.ignored_trickster)
assert test_mod.ignored_trickster not in callables
assert test_mod.ignored_trickster not in jobs
assert test_mod.ignored_trickster not in shutdowns
assert test_mod.ignored_trickster not in urls
def test_clean_module_idempotency(testplugin, tmpconfig):
testplugin.load()
test_mod = testplugin._module
callables, jobs, shutdowns, urls = loader.clean_module(
test_mod, tmpconfig)
# sanity assertions: check test_clean_module if any of these fails
assert len(callables) == 3
assert len(jobs) == 2
assert len(shutdowns) == 1
assert len(urls) == 2
# recall clean_module, we should have the same result
new_callables, new_jobs, new_shutdowns, new_urls = loader.clean_module(
test_mod, tmpconfig)
assert new_callables == callables
assert new_jobs == jobs
assert new_shutdowns == shutdowns
assert new_urls == urls
# assert is_triggerable behavior
assert loader.is_triggerable(test_mod.first_command)
assert loader.is_triggerable(test_mod.second_command)
assert loader.is_triggerable(test_mod.on_topic_command)
assert not loader.is_triggerable(test_mod.interval5s)
assert not loader.is_triggerable(test_mod.interval10s)
assert not loader.is_triggerable(test_mod.shutdown)
assert not loader.is_triggerable(test_mod.example_url)
def test_clean_callable_default(tmpconfig, func):
loader.clean_callable(func, tmpconfig)
# Default values
assert hasattr(func, 'thread')
assert func.thread is True
# Not added by default
assert not hasattr(func, 'unblockable')
assert not hasattr(func, 'priority')
assert not hasattr(func, 'rate')
assert not hasattr(func, 'channel_rate')
assert not hasattr(func, 'global_rate')
assert not hasattr(func, 'event')
assert not hasattr(func, 'rule')
assert not hasattr(func, 'find_rules')
assert not hasattr(func, 'search_rules')
assert not hasattr(func, 'commands')
assert not hasattr(func, 'nickname_commands')
assert not hasattr(func, 'action_commands')
assert not hasattr(func, 'intents')
def test_clean_callable_command(tmpconfig, func):
setattr(func, 'commands', ['test'])
loader.clean_callable(func, tmpconfig)
# Default values
assert hasattr(func, 'unblockable')
assert func.unblockable is False
assert hasattr(func, 'priority')
assert func.priority == 'medium'
assert hasattr(func, 'thread')
assert func.thread is True
assert hasattr(func, 'rate')
assert func.rate == 0
assert hasattr(func, 'channel_rate')
assert func.channel_rate == 0
assert hasattr(func, 'global_rate')
assert func.global_rate == 0
assert hasattr(func, 'event')
assert func.event == ['PRIVMSG']
assert not hasattr(func, 'rule')
def test_clean_callable_event(tmpconfig, func):
setattr(func, 'event', ['low', 'UP', 'MiXeD'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'event')
assert func.event == ['LOW', 'UP', 'MIXED']
# Default values
assert hasattr(func, 'unblockable')
assert func.unblockable is False
assert hasattr(func, 'priority')
assert func.priority == 'medium'
assert hasattr(func, 'thread')
assert func.thread is True
assert hasattr(func, 'rate')
assert func.rate == 0
assert hasattr(func, 'channel_rate')
assert func.channel_rate == 0
assert hasattr(func, 'global_rate')
assert func.global_rate == 0
# idempotency
loader.clean_callable(func, tmpconfig)
assert func.event == ['LOW', 'UP', 'MIXED']
assert func.unblockable is False
assert func.priority == 'medium'
assert func.thread is True
assert func.rate == 0
assert func.channel_rate == 0
assert func.global_rate == 0
def test_clean_callable_event_string(tmpconfig, func):
setattr(func, 'event', 'some')
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'event')
assert func.event == ['SOME']
# idempotency
loader.clean_callable(func, tmpconfig)
assert func.event == ['SOME']
def test_clean_callable_rule(tmpconfig, func):
setattr(func, 'rule', [r'abc'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'rule')
assert len(func.rule) == 1
# Test the regex is compiled properly
regex = re.compile(func.rule[0])
assert regex.match('abc')
assert regex.match('abcd')
assert not regex.match('efg')
# Default values
assert hasattr(func, 'unblockable')
assert func.unblockable is False
assert hasattr(func, 'priority')
assert func.priority == 'medium'
assert hasattr(func, 'thread')
assert func.thread is True
assert hasattr(func, 'rate')
assert func.rate == 0
assert hasattr(func, 'channel_rate')
assert func.channel_rate == 0
assert hasattr(func, 'global_rate')
assert func.global_rate == 0
# idempotency
loader.clean_callable(func, tmpconfig)
assert len(func.rule) == 1
assert regex not in func.rule
assert r'abc' in func.rule
assert func.unblockable is False
assert func.priority == 'medium'
assert func.thread is True
assert func.rate == 0
assert func.channel_rate == 0
assert func.global_rate == 0
def test_clean_callable_rule_string(tmpconfig, func):
setattr(func, 'rule', r'abc')
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'rule')
assert len(func.rule) == 1
# Test the regex is compiled properly
assert func.rule[0] == r'abc'
# idempotency
loader.clean_callable(func, tmpconfig)
assert len(func.rule) == 1
assert func.rule[0] == r'abc'
def test_clean_callable_rule_nick(tmpconfig, func):
"""Assert ``$nick`` in a rule is not replaced (deprecated feature)."""
setattr(func, 'rule', [r'$nickhello'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'rule')
assert len(func.rule) == 1
# Test the regex is not compiled
assert func.rule[0] == r'$nickhello'
# idempotency
loader.clean_callable(func, tmpconfig)
assert len(func.rule) == 1
assert func.rule[0] == r'$nickhello'
def test_clean_callable_rule_nickname(tmpconfig, func):
"""Assert ``$nickname`` in a rule is not replaced (deprecated feature)."""
setattr(func, 'rule', [r'$nickname\s+hello'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'rule')
assert len(func.rule) == 1
# Test the regex is not compiled
assert func.rule[0] == r'$nickname\s+hello'
# idempotency
loader.clean_callable(func, tmpconfig)
assert len(func.rule) == 1
assert func.rule[0] == r'$nickname\s+hello'
def test_clean_callable_find_rules(tmpconfig, func):
setattr(func, 'find_rules', [r'abc'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'find_rules')
assert len(func.find_rules) == 1
assert not hasattr(func, 'rule')
# Test the regex is compiled properly
regex = re.compile(func.find_rules[0])
assert regex.findall('abc')
assert regex.findall('abcd')
assert not regex.findall('adbc')
# Default values
assert hasattr(func, 'unblockable')
assert func.unblockable is False
assert hasattr(func, 'priority')
assert func.priority == 'medium'
assert hasattr(func, 'thread')
assert func.thread is True
assert hasattr(func, 'rate')
assert func.rate == 0
assert hasattr(func, 'channel_rate')
assert func.channel_rate == 0
assert hasattr(func, 'global_rate')
assert func.global_rate == 0
# idempotency
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'find_rules')
assert len(func.find_rules) == 1
assert regex not in func.find_rules
assert r'abc' in func.find_rules
assert not hasattr(func, 'rule')
assert func.unblockable is False
assert func.priority == 'medium'
assert func.thread is True
assert func.rate == 0
assert func.channel_rate == 0
assert func.global_rate == 0
def test_clean_callable_search_rules(tmpconfig, func):
setattr(func, 'search_rules', [r'abc'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'search_rules')
assert len(func.search_rules) == 1
assert not hasattr(func, 'rule')
# Test the regex is compiled properly
regex = re.compile(func.search_rules[0])
assert regex.search('abc')
assert regex.search('xyzabc')
assert regex.search('abcd')
assert not regex.search('adbc')
# Default values
assert hasattr(func, 'unblockable')
assert func.unblockable is False
assert hasattr(func, 'priority')
assert func.priority == 'medium'
assert hasattr(func, 'thread')
assert func.thread is True
assert hasattr(func, 'rate')
assert func.rate == 0
assert hasattr(func, 'channel_rate')
assert func.channel_rate == 0
assert hasattr(func, 'global_rate')
assert func.global_rate == 0
# idempotency
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'search_rules')
assert len(func.search_rules) == 1
assert regex not in func.search_rules
assert func.search_rules[0] == r'abc'
assert not hasattr(func, 'rule')
assert func.unblockable is False
assert func.priority == 'medium'
assert func.thread is True
assert func.rate == 0
assert func.channel_rate == 0
assert func.global_rate == 0
def test_clean_callable_nickname_command(tmpconfig, func):
setattr(func, 'nickname_commands', ['hello!'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'nickname_commands')
assert len(func.nickname_commands) == 1
assert func.nickname_commands == ['hello!']
assert not hasattr(func, 'rule')
# Default values
assert hasattr(func, 'unblockable')
assert func.unblockable is False
assert hasattr(func, 'priority')
assert func.priority == 'medium'
assert hasattr(func, 'thread')
assert func.thread is True
assert hasattr(func, 'rate')
assert func.rate == 0
assert hasattr(func, 'channel_rate')
assert func.channel_rate == 0
assert hasattr(func, 'global_rate')
assert func.global_rate == 0
# idempotency
loader.clean_callable(func, tmpconfig)
assert not hasattr(func, 'rule')
assert func.unblockable is False
assert func.priority == 'medium'
assert func.thread is True
assert func.rate == 0
assert func.channel_rate == 0
assert func.global_rate == 0
def test_clean_callable_action_command(tmpconfig, func):
setattr(func, 'action_commands', ['bots'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'action_commands')
assert len(func.action_commands) == 1
assert func.action_commands == ['bots']
assert not hasattr(func, 'rule')
# idempotency
loader.clean_callable(func, tmpconfig)
assert not hasattr(func, 'rule')
assert func.action_commands == ['bots']
def test_clean_callable_events(tmpconfig, func):
setattr(func, 'event', ['TOPIC'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'event')
assert func.event == ['TOPIC']
setattr(func, 'event', ['TOPIC', 'JOIN'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'event')
assert func.event == ['TOPIC', 'JOIN']
setattr(func, 'event', ['TOPIC', 'join', 'Nick'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'event')
assert func.event == ['TOPIC', 'JOIN', 'NICK']
def test_clean_callable_events_basestring(tmpconfig, func):
setattr(func, 'event', 'topic')
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'event')
assert func.event == ['TOPIC']
setattr(func, 'event', 'JOIN')
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'event')
assert func.event == ['JOIN']
def test_clean_callable_example(tmpconfig, func):
module.commands('test')(func)
module.example('.test hello')(func)
loader.clean_callable(func, tmpconfig)
assert hasattr(func, '_docs')
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == ['.test hello']
def test_clean_callable_example_not_set(tmpconfig, func):
module.commands('test')(func)
loader.clean_callable(func, tmpconfig)
assert hasattr(func, '_docs')
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == []
def test_clean_callable_example_multi_commands(tmpconfig, func):
module.commands('test')(func)
module.commands('unit')(func)
module.example('.test hello')(func)
loader.clean_callable(func, tmpconfig)
assert hasattr(func, '_docs')
assert len(func._docs) == 2
assert 'test' in func._docs
assert 'unit' in func._docs
test_docs = func._docs['test']
unit_docs = func._docs['unit']
assert len(test_docs) == 2
assert test_docs == unit_docs
assert test_docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert test_docs[1] == ['.test hello']
def test_clean_callable_example_first_only(tmpconfig, func):
module.commands('test')(func)
module.example('.test hello')(func)
module.example('.test bonjour')(func)
loader.clean_callable(func, tmpconfig)
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == ['.test hello']
def test_clean_callable_example_first_only_multi_commands(tmpconfig, func):
module.commands('test')(func)
module.commands('unit')(func)
module.example('.test hello')(func)
module.example('.test bonjour')(func)
loader.clean_callable(func, tmpconfig)
assert hasattr(func, '_docs')
assert len(func._docs) == 2
assert 'test' in func._docs
assert 'unit' in func._docs
test_docs = func._docs['test']
unit_docs = func._docs['unit']
assert len(test_docs) == 2
assert test_docs == unit_docs
assert test_docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert test_docs[1] == ['.test hello']
def test_clean_callable_example_user_help(tmpconfig, func):
module.commands('test')(func)
module.example('.test hello', user_help=True)(func)
loader.clean_callable(func, tmpconfig)
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == ['.test hello']
def test_clean_callable_example_user_help_multi(tmpconfig, func):
module.commands('test')(func)
module.example('.test hello', user_help=True)(func)
module.example('.test bonjour', user_help=True)(func)
loader.clean_callable(func, tmpconfig)
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == ['.test hello', '.test bonjour']
def test_clean_callable_example_user_help_mixed(tmpconfig, func):
module.commands('test')(func)
module.example('.test hello')(func)
module.example('.test bonjour', user_help=True)(func)
loader.clean_callable(func, tmpconfig)
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == ['.test bonjour']
def test_clean_callable_example_default_prefix(tmpconfig, func):
module.commands('test')(func)
module.example('.test hello')(func)
tmpconfig.core.help_prefix = '!'
loader.clean_callable(func, tmpconfig)
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == ['!test hello']
def test_clean_callable_example_nickname(tmpconfig, func):
module.commands('test')(func)
module.example('$nickname: hello')(func)
loader.clean_callable(func, tmpconfig)
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == ['TestBot: hello']
def test_clean_callable_example_nickname_custom_prefix(tmpconfig, func):
module.commands('test')(func)
module.example('$nickname: hello')(func)
tmpconfig.core.help_prefix = '!'
loader.clean_callable(func, tmpconfig)
assert len(func._docs) == 1
assert 'test' in func._docs
docs = func._docs['test']
assert len(docs) == 2
assert docs[0] == inspect.cleandoc(func.__doc__).splitlines()
assert docs[1] == ['TestBot: hello']
def test_clean_callable_intents(tmpconfig, func):
setattr(func, 'intents', [r'abc'])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'intents')
assert len(func.intents) == 1
# Test the regex is compiled properly
regex = func.intents[0]
assert regex.match('abc')
assert regex.match('abcd')
assert regex.match('ABC')
assert regex.match('AbCdE')
assert not regex.match('efg')
# Default values
assert hasattr(func, 'unblockable')
assert func.unblockable is False
assert hasattr(func, 'priority')
assert func.priority == 'medium'
assert hasattr(func, 'thread')
assert func.thread is True
assert hasattr(func, 'rate')
assert func.rate == 0
assert hasattr(func, 'channel_rate')
assert func.channel_rate == 0
assert hasattr(func, 'global_rate')
assert func.global_rate == 0
# idempotency
loader.clean_callable(func, tmpconfig)
assert len(func.intents) == 1
assert regex in func.intents
assert func.unblockable is False
assert func.priority == 'medium'
assert func.thread is True
assert func.rate == 0
assert func.channel_rate == 0
assert func.global_rate == 0
def test_clean_callable_url(tmpconfig, func):
setattr(func, 'url_regex', [re.compile('.*')])
loader.clean_callable(func, tmpconfig)
assert hasattr(func, 'url_regex')
assert len(func.url_regex) == 1
# Don't test the regex; that's handled in a different module
# Default values
assert hasattr(func, 'unblockable')
assert func.unblockable is False
assert hasattr(func, 'thread')
assert func.thread is True
assert hasattr(func, 'rate')
assert func.rate == 0
assert hasattr(func, 'channel_rate')
assert func.channel_rate == 0
assert hasattr(func, 'global_rate')
assert func.global_rate == 0
# idempotency
loader.clean_callable(func, tmpconfig)
assert len(func.url_regex) == 1
assert func.unblockable is False
assert func.thread is True
assert func.rate == 0
assert func.channel_rate == 0
assert func.global_rate == 0
| 28.899654 | 82 | 0.699872 |
ace65c28feca588cd7c45f74bab09c757f016563 | 4,586 | py | Python | main.py | yvan674/python-google-cal | 2006dafd048f5c78f8a2c73c37aac7202681f748 | [
"MIT"
] | null | null | null | main.py | yvan674/python-google-cal | 2006dafd048f5c78f8a2c73c37aac7202681f748 | [
"MIT"
] | null | null | null | main.py | yvan674/python-google-cal | 2006dafd048f5c78f8a2c73c37aac7202681f748 | [
"MIT"
] | null | null | null | """Main.
This script accesses google calendar through the calendar API to update or
create events.
"""
import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/calendar.events']
CALENDAR_NAME = "Pearl GPU's"
EVENT_NAME = "satyaway pearl5 full"
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
# Get a list of calendars and chooses the correct one
calendars = get_calendar_list(service)
calendar_id = [calendar['id'] for calendar in calendars
if calendar['summary'] == CALENDAR_NAME][0] # Assumes
# calendars are uniquely named.
# Get a list of events and chooses the correct one
events = get_events_list(service, calendar_id)
event_id = [event['id'] for event in events
if event['summary'] == EVENT_NAME] # Assumes events are
# uniquely named.
# Get end date since this is used in both cases.
cdt = datetime.datetime.now() # cdt = current date time
end_date = cdt + datetime.timedelta(days=4)
end_date = "{:04d}-{:02d}-{:02d}".format(end_date.year, end_date.month,
end_date.day)
if not event_id:
print("Event not found. Creating a new event.")
# Set up start time
cdt = "{:04d}-{:02d}-{:02d}".format(cdt.year, cdt.month, cdt.day)
# Create event
event = {
'summary': EVENT_NAME,
'start': {
'date': cdt
},
'end': {
'date': end_date
},
'reminders': {
'useDefault': False
}
}
event = service.events().insert(calendarId=calendar_id, body=event)\
.execute()
print("Event created: {}".format(event.get('htmlLink')))
else:
event = service.events().get(calendarId=calendar_id,
eventId=event_id[0]).execute()
# updates event to end 3 days after whenever this script was run
event['end'] = {'date': end_date}
updated_event = service.events().update(calendarId=CALENDAR_NAME,
eventId=event_id[0],
body=event).execute()
print("Updated: {}".format(updated_event['updated']))
def get_events_list(service, calendar_id, max_results=50) -> list:
"""Get the next x events from a given calendar."""
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC.
output_list = []
events_result = service.events().list(calendarId=calendar_id, timeMin=now,
maxResults=max_results,
singleEvents=True,
orderBy='startTime').execute()
for event in events_result['items']:
output_list.append(event)
return output_list
def get_calendar_list(service) -> list:
"""Gets a list of calendars in the account."""
page_token = None
output_list = []
while True:
calendar_list = service.calendarList() \
.list(pageToken=page_token).execute()
for calendar_list_entry in calendar_list['items']:
output_list.append(calendar_list_entry)
page_token = calendar_list.get('nextPageToken')
if not page_token:
break
return output_list
if __name__ == '__main__':
main()
| 35.276923 | 79 | 0.59638 |
ace65d4774c696eef8887dd0868022cfb5cda03e | 41,948 | py | Python | tensorflow_examples/lite/model_maker/core/task/model_spec.py | hassanali2596/examples | ce3a6dbcb4b39352a0f00d80e233e30befff1586 | [
"Apache-2.0"
] | null | null | null | tensorflow_examples/lite/model_maker/core/task/model_spec.py | hassanali2596/examples | ce3a6dbcb4b39352a0f00d80e233e30befff1586 | [
"Apache-2.0"
] | null | null | null | tensorflow_examples/lite/model_maker/core/task/model_spec.py | hassanali2596/examples | ce3a6dbcb4b39352a0f00d80e233e30befff1586 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import inspect
import os
import re
import tempfile
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import compat
from tensorflow_examples.lite.model_maker.core import file_util
from tensorflow_examples.lite.model_maker.core.task import hub_loader
from tensorflow_examples.lite.model_maker.core.task import model_util
import tensorflow_hub as hub
from tensorflow_hub import registry
from official.nlp import optimization
from official.nlp.bert import configs as bert_configs
from official.nlp.bert import run_squad_helper
from official.nlp.bert import squad_evaluate_v1_1
from official.nlp.bert import squad_evaluate_v2_0
from official.nlp.bert import tokenization
from official.nlp.data import classifier_data_lib
from official.nlp.data import squad_lib
from official.nlp.modeling import models
from official.utils.misc import distribution_utils
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def _get_compat_tf_versions(compat_tf_versions=None):
"""Gets compatible tf versions (default: [2]).
Args:
compat_tf_versions: int, int list or None, indicates compatible versions.
Returns:
A list of compatible tf versions.
"""
if compat_tf_versions is None:
compat_tf_versions = [2]
if not isinstance(compat_tf_versions, list):
compat_tf_versions = [compat_tf_versions]
return compat_tf_versions
def get_num_gpus(num_gpus):
try:
tot_num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))
except (tf.errors.NotFoundError, tf.errors.InternalError):
tot_num_gpus = max(0, num_gpus)
if num_gpus > tot_num_gpus or num_gpus == -1:
num_gpus = tot_num_gpus
return num_gpus
class ImageModelSpec(object):
"""A specification of image model."""
mean_rgb = [0.0]
stddev_rgb = [255.0]
def __init__(self,
uri,
compat_tf_versions=None,
input_image_shape=None,
name=''):
self.uri = uri
self.compat_tf_versions = _get_compat_tf_versions(compat_tf_versions)
self.name = name
if input_image_shape is None:
input_image_shape = [224, 224]
self.input_image_shape = input_image_shape
mobilenet_v2_spec = ImageModelSpec(
uri='https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4',
compat_tf_versions=2,
name='mobilenet_v2')
resnet_50_spec = ImageModelSpec(
uri='https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4',
compat_tf_versions=2,
name='resnet_50')
efficientnet_lite0_spec = ImageModelSpec(
uri='https://tfhub.dev/tensorflow/efficientnet/lite0/feature-vector/2',
compat_tf_versions=[1, 2],
name='efficientnet_lite0')
efficientnet_lite1_spec = ImageModelSpec(
uri='https://tfhub.dev/tensorflow/efficientnet/lite1/feature-vector/2',
compat_tf_versions=[1, 2],
input_image_shape=[240, 240],
name='efficientnet_lite1')
efficientnet_lite2_spec = ImageModelSpec(
uri='https://tfhub.dev/tensorflow/efficientnet/lite2/feature-vector/2',
compat_tf_versions=[1, 2],
input_image_shape=[260, 260],
name='efficientnet_lite2')
efficientnet_lite3_spec = ImageModelSpec(
uri='https://tfhub.dev/tensorflow/efficientnet/lite3/feature-vector/2',
compat_tf_versions=[1, 2],
input_image_shape=[280, 280],
name='efficientnet_lite3')
efficientnet_lite4_spec = ImageModelSpec(
uri='https://tfhub.dev/tensorflow/efficientnet/lite4/feature-vector/2',
compat_tf_versions=[1, 2],
input_image_shape=[300, 300],
name='efficientnet_lite4')
class AverageWordVecModelSpec(object):
"""A specification of averaging word vector model."""
PAD = '<PAD>' # Index: 0
START = '<START>' # Index: 1
UNKNOWN = '<UNKNOWN>' # Index: 2
compat_tf_versions = _get_compat_tf_versions(2)
need_gen_vocab = True
default_training_epochs = 2
default_batch_size = 32
convert_from_saved_model_tf2 = False
def __init__(self,
num_words=10000,
seq_len=256,
wordvec_dim=16,
lowercase=True,
dropout_rate=0.2):
"""Initialze a instance with preprocessing and model paramaters.
Args:
num_words: Number of words to generate the vocabulary from data.
seq_len: Length of the sequence to feed into the model.
wordvec_dim: Dimension of the word embedding.
lowercase: Whether to convert all uppercase character to lowercase during
preprocessing.
dropout_rate: The rate for dropout.
"""
self.num_words = num_words
self.seq_len = seq_len
self.wordvec_dim = wordvec_dim
self.lowercase = lowercase
self.dropout_rate = dropout_rate
def get_name_to_features(self):
"""Gets the dictionary describing the features."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),
'label_ids': tf.io.FixedLenFeature([], tf.int64),
}
return name_to_features
def select_data_from_record(self, record):
"""Dispatches records to features and labels."""
x = record['input_ids']
y = record['label_ids']
return (x, y)
def convert_examples_to_features(self, examples, tfrecord_file, label_names):
"""Converts examples to features and write them into TFRecord file."""
writer = tf.io.TFRecordWriter(tfrecord_file)
label_to_id = dict((name, i) for i, name in enumerate(label_names))
for example in examples:
features = collections.OrderedDict()
input_ids = self.preprocess(example.text_a)
label_id = label_to_id[example.label]
features['input_ids'] = create_int_feature(input_ids)
features['label_ids'] = create_int_feature([label_id])
tf_example = tf.train.Example(
features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def create_model(self, num_classes, optimizer='rmsprop'):
"""Creates the keras model."""
# Gets a classifier model.
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=[self.seq_len], dtype=tf.int32),
tf.keras.layers.Embedding(
len(self.vocab), self.wordvec_dim, input_length=self.seq_len),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(self.wordvec_dim, activation=tf.nn.relu),
tf.keras.layers.Dropout(self.dropout_rate),
tf.keras.layers.Dense(num_classes, activation='softmax')
])
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def run_classifier(self, train_input_fn, validation_input_fn, epochs,
steps_per_epoch, validation_steps, num_classes):
"""Creates classifier and runs the classifier training."""
if epochs is None:
epochs = self.default_training_epochs
model = self.create_model(num_classes)
# Gets training and validation dataset
train_ds = train_input_fn()
validation_ds = None
if validation_input_fn is not None:
validation_ds = validation_input_fn()
# Trains the models.
model.fit(
train_ds,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=validation_ds,
validation_steps=validation_steps)
return model
def gen_vocab(self, examples):
"""Generates vocabulary list in `examples` with maximum `num_words` words."""
vocab_counter = collections.Counter()
for example in examples:
tokens = self._tokenize(example.text_a)
for token in tokens:
vocab_counter[token] += 1
vocab_freq = vocab_counter.most_common(self.num_words)
vocab_list = [self.PAD, self.START, self.UNKNOWN
] + [word for word, _ in vocab_freq]
self.vocab = collections.OrderedDict(
((v, i) for i, v in enumerate(vocab_list)))
return self.vocab
def preprocess(self, raw_text):
"""Preprocess the text for text classification."""
tokens = self._tokenize(raw_text)
# Gets ids for START, PAD and UNKNOWN tokens.
start_id = self.vocab[self.START]
pad_id = self.vocab[self.PAD]
unknown_id = self.vocab[self.UNKNOWN]
token_ids = [self.vocab.get(token, unknown_id) for token in tokens]
token_ids = [start_id] + token_ids
if len(token_ids) < self.seq_len:
# Padding.
pad_length = self.seq_len - len(token_ids)
token_ids = token_ids + pad_length * [pad_id]
else:
token_ids = token_ids[:self.seq_len]
return token_ids
def _tokenize(self, text):
r"""Splits by '\W' except '\''."""
text = tf.compat.as_text(text)
if self.lowercase:
text = text.lower()
tokens = re.compile(r'[^\w\']+').split(text.strip())
return list(filter(None, tokens))
def save_vocab(self, vocab_filename):
"""Saves the vocabulary in `vocab_filename`."""
with tf.io.gfile.GFile(vocab_filename, 'w') as f:
for token, index in self.vocab.items():
f.write('%s %d\n' % (token, index))
tf.compat.v1.logging.info('Saved vocabulary in %s.', vocab_filename)
def load_vocab(self, vocab_filename):
"""Loads vocabulary from `vocab_filename`."""
with tf.io.gfile.GFile(vocab_filename, 'r') as f:
vocab_list = []
for line in f:
word, index = line.strip().split()
vocab_list.append((word, int(index)))
self.vocab = collections.OrderedDict(vocab_list)
return self.vocab
def get_config(self):
"""Gets the configuration."""
return {
'num_words': self.num_words,
'seq_len': self.seq_len,
'wordvec_dim': self.wordvec_dim,
'lowercase': self.lowercase
}
def create_classifier_model(bert_config,
num_labels,
max_seq_length,
initializer=None,
hub_module_url=None,
hub_module_trainable=True,
is_tf2=True):
"""BERT classifier model in functional API style.
Construct a Keras model for predicting `num_labels` outputs from an input with
maximum sequence length `max_seq_length`.
Args:
bert_config: BertConfig, the config defines the core Bert model.
num_labels: integer, the number of classes.
max_seq_length: integer, the maximum input sequence length.
initializer: Initializer for the final dense layer in the span labeler.
Defaulted to TruncatedNormal initializer.
hub_module_url: TF-Hub path/url to Bert module.
hub_module_trainable: True to finetune layers in the hub module.
is_tf2: boolean, whether the hub module is in TensorFlow 2.x format.
Returns:
Combined prediction model (words, mask, type) -> (one-hot labels)
BERT sub-model (words, mask, type) -> (bert_outputs)
"""
if initializer is None:
initializer = tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range)
input_word_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
input_mask = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
input_type_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
if is_tf2:
bert_model = hub.KerasLayer(hub_module_url, trainable=hub_module_trainable)
pooled_output, _ = bert_model([input_word_ids, input_mask, input_type_ids])
else:
bert_model = hub_loader.HubKerasLayerV1V2(
hub_module_url,
signature='tokens',
output_key='pooled_output',
trainable=hub_module_trainable)
pooled_output = bert_model({
'input_ids': input_word_ids,
'input_mask': input_mask,
'segment_ids': input_type_ids
})
output = tf.keras.layers.Dropout(rate=bert_config.hidden_dropout_prob)(
pooled_output)
output = tf.keras.layers.Dense(
num_labels,
kernel_initializer=initializer,
name='output',
activation='softmax',
dtype=tf.float32)(
output)
return tf.keras.Model(
inputs=[input_word_ids, input_mask, input_type_ids],
outputs=output), bert_model
class BertModelSpec(object):
"""A specification of BERT model."""
compat_tf_versions = _get_compat_tf_versions(2)
need_gen_vocab = False
default_batch_size = 32
def __init__(
self,
uri='https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1',
model_dir=None,
seq_len=128,
dropout_rate=0.1,
initializer_range=0.02,
learning_rate=3e-5,
distribution_strategy='mirrored',
num_gpus=-1,
tpu='',
trainable=True,
do_lower_case=True,
is_tf2=True,
convert_from_saved_model_tf2=False):
"""Initialze an instance with model paramaters.
Args:
uri: TF-Hub path/url to Bert module.
model_dir: The location of the model checkpoint files.
seq_len: Length of the sequence to feed into the model.
dropout_rate: The rate for dropout.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
learning_rate: The initial learning rate for Adam.
distribution_strategy: A string specifying which distribution strategy to
use. Accepted values are 'off', 'one_device', 'mirrored',
'parameter_server', 'multi_worker_mirrored', and 'tpu' -- case
insensitive. 'off' means not to use Distribution Strategy; 'tpu' means
to use TPUStrategy using `tpu_address`.
num_gpus: How many GPUs to use at each worker with the
DistributionStrategies API. The default is -1, which means utilize all
available GPUs.
tpu: TPU address to connect to.
trainable: boolean, whether pretrain layer is trainable.
do_lower_case: boolean, whether to lower case the input text. Should be
True for uncased models and False for cased models.
is_tf2: boolean, whether the hub module is in TensorFlow 2.x format.
convert_from_saved_model_tf2: Convert to TFLite from saved_model in TF
2.x.
"""
if compat.get_tf_behavior() not in self.compat_tf_versions:
raise ValueError('Incompatible versions. Expect {}, but got {}.'.format(
self.compat_tf_versions, compat.get_tf_behavior()))
self.seq_len = seq_len
self.dropout_rate = dropout_rate
self.initializer_range = initializer_range
self.learning_rate = learning_rate
self.trainable = trainable
self.model_dir = model_dir
if self.model_dir is None:
self.model_dir = tempfile.mkdtemp()
num_gpus = get_num_gpus(num_gpus)
self.strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=distribution_strategy,
num_gpus=num_gpus,
tpu_address=tpu)
self.tpu = tpu
self.uri = uri
self.do_lower_case = do_lower_case
self.is_tf2 = is_tf2
self.bert_config = bert_configs.BertConfig(
0,
initializer_range=self.initializer_range,
hidden_dropout_prob=self.dropout_rate)
self.convert_from_saved_model_tf2 = convert_from_saved_model_tf2
self.is_built = False
def reorder_input_details(self, tflite_input_details):
"""Reorders the tflite input details to map the order of keras model."""
for detail in tflite_input_details:
name = detail['name']
if 'input_word_ids' in name:
input_word_ids_detail = detail
elif 'input_mask' in name:
input_mask_detail = detail
elif 'input_type_ids' in name:
input_type_ids_detail = detail
return [input_word_ids_detail, input_mask_detail, input_type_ids_detail]
def build(self):
"""Builds the class. Used for lazy initialization."""
if self.is_built:
return
self.vocab_file = os.path.join(
registry.resolver(self.uri), 'assets', 'vocab.txt')
self.tokenizer = tokenization.FullTokenizer(self.vocab_file,
self.do_lower_case)
def save_vocab(self, vocab_filename):
"""Prints the file path to the vocabulary."""
if not self.is_built:
self.build()
tf.io.gfile.copy(self.vocab_file, vocab_filename, overwrite=True)
tf.compat.v1.logging.info('Saved vocabulary in %s.', vocab_filename)
class BertClassifierModelSpec(BertModelSpec):
"""A specification of BERT model for text classification."""
def get_name_to_features(self):
"""Gets the dictionary describing the features."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),
'input_mask': tf.io.FixedLenFeature([self.seq_len], tf.int64),
'segment_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),
'label_ids': tf.io.FixedLenFeature([], tf.int64),
'is_real_example': tf.io.FixedLenFeature([], tf.int64),
}
return name_to_features
def select_data_from_record(self, record):
"""Dispatches records to features and labels."""
x = {
'input_word_ids': record['input_ids'],
'input_mask': record['input_mask'],
'input_type_ids': record['segment_ids']
}
y = record['label_ids']
return (x, y)
def convert_examples_to_features(self, examples, tfrecord_file, label_names):
"""Converts examples to features and write them into TFRecord file."""
if not self.is_built:
self.build()
classifier_data_lib.file_based_convert_examples_to_features(
examples, label_names, self.seq_len, self.tokenizer, tfrecord_file)
def create_model(self, num_classes, optimizer='adam'):
"""Creates the keras model."""
bert_model, _ = create_classifier_model(
self.bert_config,
num_classes,
self.seq_len,
hub_module_url=self.uri,
hub_module_trainable=self.trainable,
is_tf2=self.is_tf2)
# Defines evaluation metrics function, which will create metrics in the
# correct device and strategy scope.
def metric_fn():
return tf.keras.metrics.SparseCategoricalAccuracy(
'test_accuracy', dtype=tf.float32)
bert_model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[metric_fn()])
return bert_model
def run_classifier(self, train_input_fn, validation_input_fn, epochs,
steps_per_epoch, validation_steps, num_classes):
"""Creates classifier and runs the classifier training."""
warmup_steps = int(epochs * steps_per_epoch * 0.1)
initial_lr = self.learning_rate
with distribution_utils.get_strategy_scope(self.strategy):
training_dataset = train_input_fn()
evaluation_dataset = None
if validation_input_fn is not None:
evaluation_dataset = validation_input_fn()
optimizer = optimization.create_optimizer(initial_lr,
steps_per_epoch * epochs,
warmup_steps)
bert_model = self.create_model(num_classes, optimizer)
summary_dir = os.path.join(self.model_dir, 'summaries')
summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
checkpoint_path = os.path.join(self.model_dir, 'checkpoint')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True)
bert_model.fit(
x=training_dataset,
validation_data=evaluation_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=validation_steps,
callbacks=[summary_callback, checkpoint_callback])
return bert_model
def get_config(self):
"""Gets the configuration."""
# Only preprocessing related variables are included.
return {'uri': self.uri, 'seq_len': self.seq_len}
def dump_to_files(all_predictions, all_nbest_json, scores_diff_json,
version_2_with_negative, output_dir):
"""Save output to json files for question answering."""
output_prediction_file = os.path.join(output_dir, 'predictions.json')
output_nbest_file = os.path.join(output_dir, 'nbest_predictions.json')
output_null_log_odds_file = os.path.join(output_dir, 'null_odds.json')
tf.compat.v1.logging.info('Writing predictions to: %s',
(output_prediction_file))
tf.compat.v1.logging.info('Writing nbest to: %s', (output_nbest_file))
squad_lib.write_to_json_files(all_predictions, output_prediction_file)
squad_lib.write_to_json_files(all_nbest_json, output_nbest_file)
if version_2_with_negative:
squad_lib.write_to_json_files(scores_diff_json, output_null_log_odds_file)
def create_qa_model(bert_config,
max_seq_length,
initializer=None,
hub_module_url=None,
hub_module_trainable=True,
is_tf2=True):
"""Returns BERT qa model along with core BERT model to import weights.
Args:
bert_config: BertConfig, the config defines the core Bert model.
max_seq_length: integer, the maximum input sequence length.
initializer: Initializer for the final dense layer in the span labeler.
Defaulted to TruncatedNormal initializer.
hub_module_url: TF-Hub path/url to Bert module.
hub_module_trainable: True to finetune layers in the hub module.
is_tf2: boolean, whether the hub module is in TensorFlow 2.x format.
Returns:
A tuple of (1) keras model that outputs start logits and end logits and
(2) the core BERT transformer encoder.
"""
if initializer is None:
initializer = tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range)
input_word_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
input_mask = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
input_type_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
if is_tf2:
core_model = hub.KerasLayer(hub_module_url, trainable=hub_module_trainable)
pooled_output, sequence_output = core_model(
[input_word_ids, input_mask, input_type_ids])
else:
bert_model = hub_loader.HubKerasLayerV1V2(
hub_module_url,
signature='tokens',
signature_outputs_as_dict=True,
trainable=hub_module_trainable)
outputs = bert_model({
'input_ids': input_word_ids,
'input_mask': input_mask,
'segment_ids': input_type_ids
})
pooled_output = outputs['pooled_output']
sequence_output = outputs['sequence_output']
bert_encoder = tf.keras.Model(
inputs=[input_word_ids, input_mask, input_type_ids],
outputs=[sequence_output, pooled_output],
name='core_model')
return models.BertSpanLabeler(
network=bert_encoder, initializer=initializer), bert_encoder
def create_qa_model_from_squad(max_seq_length,
hub_module_url,
hub_module_trainable=True,
is_tf2=False):
"""Creates QA model the initialized from the model retrained on Squad dataset.
Args:
max_seq_length: integer, the maximum input sequence length.
hub_module_url: TF-Hub path/url to Bert module that's retrained on Squad
dataset.
hub_module_trainable: True to finetune layers in the hub module.
is_tf2: boolean, whether the hub module is in TensorFlow 2.x format.
Returns:
Keras model that outputs start logits and end logits.
"""
if is_tf2:
raise ValueError('Only supports to load TensorFlow 1.x hub module.')
input_word_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
input_mask = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
input_type_ids = tf.keras.layers.Input(
shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
squad_bert = hub_loader.HubKerasLayerV1V2(
hub_module_url,
signature='squad',
signature_outputs_as_dict=True,
trainable=hub_module_trainable)
outputs = squad_bert({
'input_ids': input_word_ids,
'input_mask': input_mask,
'segment_ids': input_type_ids
})
start_logits = tf.keras.layers.Lambda(
tf.identity, name='start_positions')(
outputs['start_logits'])
end_logits = tf.keras.layers.Lambda(
tf.identity, name='end_positions')(
outputs['end_logits'])
return tf.keras.Model(
inputs=[input_word_ids, input_mask, input_type_ids],
outputs=[start_logits, end_logits])
class BertQAModelSpec(BertModelSpec):
"""A specification of BERT model for question answering."""
def __init__(
self,
uri='https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1',
model_dir=None,
seq_len=384,
query_len=64,
doc_stride=128,
dropout_rate=0.1,
initializer_range=0.02,
learning_rate=8e-5,
distribution_strategy='mirrored',
num_gpus=-1,
tpu='',
trainable=True,
predict_batch_size=8,
do_lower_case=True,
is_tf2=True,
convert_from_saved_model_tf2=False,
tflite_output_name=None,
init_from_squad_model=False):
"""Initialze an instance with model paramaters.
Args:
uri: TF-Hub path/url to Bert module.
model_dir: The location of the model checkpoint files.
seq_len: Length of the sequence to feed into the model.
query_len: Length of the query to feed into the model.
doc_stride: The stride when we do a sliding window approach to take chunks
of the documents.
dropout_rate: The rate for dropout.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
learning_rate: The initial learning rate for Adam.
distribution_strategy: A string specifying which distribution strategy to
use. Accepted values are 'off', 'one_device', 'mirrored',
'parameter_server', 'multi_worker_mirrored', and 'tpu' -- case
insensitive. 'off' means not to use Distribution Strategy; 'tpu' means
to use TPUStrategy using `tpu_address`.
num_gpus: How many GPUs to use at each worker with the
DistributionStrategies API. The default is -1, which means utilize all
available GPUs.
tpu: TPU address to connect to.
trainable: boolean, whether pretrain layer is trainable.
predict_batch_size: Batch size for prediction.
do_lower_case: boolean, whether to lower case the input text. Should be
True for uncased models and False for cased models.
is_tf2: boolean, whether the hub module is in TensorFlow 2.x format.
convert_from_saved_model_tf2: Convert to TFLite from saved_model in TF
2.x.
tflite_output_name: Dict, output names for the TFLite model.
init_from_squad_model: boolean, whether to initialize from the model that
is already retrained on Squad 1.1.
"""
super(BertQAModelSpec,
self).__init__(uri, model_dir, seq_len, dropout_rate,
initializer_range, learning_rate,
distribution_strategy, num_gpus, tpu, trainable,
do_lower_case, is_tf2, convert_from_saved_model_tf2)
self.query_len = query_len
self.doc_stride = doc_stride
self.predict_batch_size = predict_batch_size
if tflite_output_name is None:
tflite_output_name = {
'start_logits': 'Identity_1',
'end_logits': 'Identity'
}
self.tflite_output_name = tflite_output_name
self.init_from_squad_model = init_from_squad_model
def get_name_to_features(self, is_training):
"""Gets the dictionary describing the features."""
name_to_features = {
'input_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),
'input_mask': tf.io.FixedLenFeature([self.seq_len], tf.int64),
'segment_ids': tf.io.FixedLenFeature([self.seq_len], tf.int64),
}
if is_training:
name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64)
name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64)
else:
name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64)
return name_to_features
def select_data_from_record(self, record):
"""Dispatches records to features and labels."""
x, y = {}, {}
for name, tensor in record.items():
if name in ('start_positions', 'end_positions'):
y[name] = tensor
elif name == 'input_ids':
x['input_word_ids'] = tensor
elif name == 'segment_ids':
x['input_type_ids'] = tensor
else:
x[name] = tensor
return (x, y)
def get_config(self):
"""Gets the configuration."""
# Only preprocessing related variables are included.
return {
'uri': self.uri,
'seq_len': self.seq_len,
'query_len': self.query_len,
'doc_stride': self.doc_stride
}
def convert_examples_to_features(self, examples, is_training, output_fn,
batch_size):
"""Converts examples to features and write them into TFRecord file."""
if not self.is_built:
self.build()
return squad_lib.convert_examples_to_features(
examples=examples,
tokenizer=self.tokenizer,
max_seq_length=self.seq_len,
doc_stride=self.doc_stride,
max_query_length=self.query_len,
is_training=is_training,
output_fn=output_fn,
batch_size=batch_size)
def create_model(self):
"""Creates the model for qa task."""
if self.init_from_squad_model:
return create_qa_model_from_squad(self.seq_len, self.uri, self.trainable,
self.is_tf2)
else:
qa_model, _ = create_qa_model(
self.bert_config,
self.seq_len,
hub_module_url=self.uri,
hub_module_trainable=self.trainable,
is_tf2=self.is_tf2)
return qa_model
def train(self, train_input_fn, epochs, steps_per_epoch):
"""Run bert QA training."""
warmup_steps = int(epochs * steps_per_epoch * 0.1)
def _loss_fn(positions, logits):
"""Get losss function for QA model."""
loss = tf.keras.losses.sparse_categorical_crossentropy(
positions, logits, from_logits=True)
return tf.reduce_mean(loss)
with distribution_utils.get_strategy_scope(self.strategy):
training_dataset = train_input_fn()
bert_model = self.create_model()
optimizer = optimization.create_optimizer(self.learning_rate,
steps_per_epoch * epochs,
warmup_steps)
bert_model.compile(
optimizer=optimizer, loss=_loss_fn, loss_weights=[0.5, 0.5])
summary_dir = os.path.join(self.model_dir, 'summaries')
summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
checkpoint_path = os.path.join(self.model_dir, 'checkpoint')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True)
if not bert_model.trainable_variables:
tf.compat.v1.logging.warning(
'Trainable variables in the model are empty.')
return bert_model
bert_model.fit(
x=training_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[summary_callback, checkpoint_callback])
return bert_model
def _predict_without_distribute_strategy(self, model, input_fn):
"""Predicts the dataset without using distribute strategy."""
ds = input_fn()
all_results = []
for features, _ in ds:
outputs = model.predict_on_batch(features)
for unique_id, start_logits, end_logits in zip(features['unique_ids'],
outputs[0], outputs[1]):
raw_result = run_squad_helper.RawResult(
unique_id=unique_id.numpy(),
start_logits=start_logits.tolist(),
end_logits=end_logits.tolist())
all_results.append(raw_result)
if len(all_results) % 100 == 0:
tf.compat.v1.logging.info('Made predictions for %d records.',
len(all_results))
return all_results
def _predict_with_distribute_strategy(self, model, input_fn, num_steps):
"""Predicts the dataset using distribute strategy."""
predict_iterator = iter(
self.strategy.experimental_distribute_datasets_from_function(input_fn))
@tf.function
def predict_step(iterator):
"""Predicts on distributed devices."""
def _replicated_step(inputs):
"""Replicated prediction calculation."""
x, _ = inputs
unique_ids = x.pop('unique_ids')
start_logits, end_logits = model(x, training=False)
return dict(
unique_ids=unique_ids,
start_logits=start_logits,
end_logits=end_logits)
outputs = self.strategy.run(_replicated_step, args=(next(iterator),))
return tf.nest.map_structure(self.strategy.experimental_local_results,
outputs)
all_results = []
for _ in range(num_steps):
predictions = predict_step(predict_iterator)
for result in run_squad_helper.get_raw_results(predictions):
all_results.append(result)
if len(all_results) % 100 == 0:
tf.compat.v1.logging.info('Made predictions for %d records.',
len(all_results))
return all_results
def predict(self, model, input_fn, num_steps):
"""Predicts the dataset from `input_fn` for `model`."""
if self.strategy:
return self._predict_with_distribute_strategy(model, input_fn, num_steps)
else:
return self._predict_without_distribute_strategy(model, input_fn)
def reorder_output_details(self, tflite_output_details):
"""Reorders the tflite output details to map the order of keras model."""
for detail in tflite_output_details:
name = detail['name']
if self.tflite_output_name['start_logits'] == name:
start_logits_detail = detail
if self.tflite_output_name['end_logits'] == name:
end_logits_detail = detail
return (start_logits_detail, end_logits_detail)
def predict_tflite(self, tflite_filepath, input_fn):
"""Predicts the `input_fn` dataset for TFLite model in `tflite_filepath`."""
ds = input_fn()
all_results = []
lite_runner = model_util.LiteRunner(tflite_filepath,
self.reorder_input_details,
self.reorder_output_details)
for features, _ in ds:
outputs = lite_runner.run(features)
for unique_id, start_logits, end_logits in zip(features['unique_ids'],
outputs[0], outputs[1]):
raw_result = run_squad_helper.RawResult(
unique_id=unique_id.numpy(),
start_logits=start_logits.tolist(),
end_logits=end_logits.tolist())
all_results.append(raw_result)
if len(all_results) % 100 == 0:
tf.compat.v1.logging.info('Made predictions for %d records.',
len(all_results))
return all_results
def evaluate(self, model, tflite_filepath, input_fn, num_steps, eval_examples,
eval_features, predict_file, version_2_with_negative,
max_answer_length, null_score_diff_threshold, verbose_logging,
output_dir):
"""Evaluate QA model.
Args:
model: The keras model to be evaluated.
tflite_filepath: File path to the TFLite model.
input_fn: Function that returns a tf.data.Dataset used for evaluation.
num_steps: Number of steps to evaluate the model.
eval_examples: List of `squad_lib.SquadExample` for evaluation data.
eval_features: List of `squad_lib.InputFeatures` for evaluation data.
predict_file: The input predict file.
version_2_with_negative: Whether the input predict file is SQuAD 2.0
format.
max_answer_length: The maximum length of an answer that can be generated.
This is needed because the start and end predictions are not conditioned
on one another.
null_score_diff_threshold: If null_score - best_non_null is greater than
the threshold, predict null. This is only used for SQuAD v2.
verbose_logging: If true, all of the warnings related to data processing
will be printed. A number of warnings are expected for a normal SQuAD
evaluation.
output_dir: The output directory to save output to json files:
predictions.json, nbest_predictions.json, null_odds.json. If None, skip
saving to json files.
Returns:
A dict contains two metrics: Exact match rate and F1 score.
"""
if model is not None and tflite_filepath is not None:
raise ValueError('Exactly one of the paramaters `model` and '
'`tflite_filepath` should be set.')
elif model is None and tflite_filepath is None:
raise ValueError('At least one of the parameters `model` and '
'`tflite_filepath` are None.')
if tflite_filepath is not None:
all_results = self.predict_tflite(tflite_filepath, input_fn)
else:
all_results = self.predict(model, input_fn, num_steps)
all_predictions, all_nbest_json, scores_diff_json = (
squad_lib.postprocess_output(
eval_examples,
eval_features,
all_results,
n_best_size=20,
max_answer_length=max_answer_length,
do_lower_case=self.do_lower_case,
version_2_with_negative=version_2_with_negative,
null_score_diff_threshold=null_score_diff_threshold,
verbose=verbose_logging))
if output_dir is not None:
dump_to_files(all_predictions, all_nbest_json, scores_diff_json,
version_2_with_negative, output_dir)
dataset_json = file_util.load_json_file(predict_file)
pred_dataset = dataset_json['data']
if version_2_with_negative:
eval_metrics = squad_evaluate_v2_0.evaluate(pred_dataset, all_predictions,
scores_diff_json)
else:
eval_metrics = squad_evaluate_v1_1.evaluate(pred_dataset, all_predictions)
return eval_metrics
mobilebert_classifier_spec = BertClassifierModelSpec(
uri='https://tfhub.dev/google/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT/1',
is_tf2=False,
distribution_strategy='off',
convert_from_saved_model_tf2=True)
mobilebert_classifier_spec.default_batch_size = 48
mobilebert_qa_spec = BertQAModelSpec(
uri='https://tfhub.dev/google/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT/1',
is_tf2=False,
distribution_strategy='off',
convert_from_saved_model_tf2=True,
learning_rate=5e-05,
tflite_output_name={
'start_logits': 'StatefulPartitionedCall:1',
'end_logits': 'StatefulPartitionedCall:0'
})
mobilebert_qa_spec.default_batch_size = 48
mobilebert_qa_squad_spec = BertQAModelSpec(
uri='https://tfhub.dev/google/mobilebert/uncased_L-24_H-128_B-512_A-4_F-4_OPT/squadv1/1',
is_tf2=False,
distribution_strategy='off',
convert_from_saved_model_tf2=True,
learning_rate=5e-05,
tflite_output_name={
'start_logits': 'StatefulPartitionedCall:1',
'end_logits': 'StatefulPartitionedCall:0'
},
init_from_squad_model=True)
mobilebert_qa_squad_spec.default_batch_size = 48
# A dict for model specs to make it accessible by string key.
MODEL_SPECS = {
'efficientnet_lite0': efficientnet_lite0_spec,
'efficientnet_lite1': efficientnet_lite1_spec,
'efficientnet_lite2': efficientnet_lite2_spec,
'efficientnet_lite3': efficientnet_lite3_spec,
'efficientnet_lite4': efficientnet_lite4_spec,
'mobilenet_v2': mobilenet_v2_spec,
'resnet_50': resnet_50_spec,
'average_word_vec': AverageWordVecModelSpec,
'bert': BertModelSpec,
'bert_classifier': BertClassifierModelSpec,
'bert_qa': BertQAModelSpec,
'mobilebert_classifier': mobilebert_classifier_spec,
'mobilebert_qa': mobilebert_qa_spec,
'mobilebert_qa_squad': mobilebert_qa_squad_spec,
}
# List constants for supported models.
IMAGE_CLASSIFICATION_MODELS = [
'efficientnet_lite0', 'efficientnet_lite1', 'efficientnet_lite2',
'efficientnet_lite3', 'efficientnet_lite4', 'mobilenet_v2', 'resnet_50'
]
TEXT_CLASSIFICATION_MODELS = [
'bert_classifier', 'average_word_vec', 'mobilebert_classifier'
]
QUESTION_ANSWERING_MODELS = ['bert_qa', 'mobilebert_qa', 'mobilebert_qa_squad']
def get(spec_or_str):
"""Gets model spec by name or instance, and initializes by default."""
if isinstance(spec_or_str, str):
model_spec = MODEL_SPECS[spec_or_str]
else:
model_spec = spec_or_str
if inspect.isclass(model_spec):
return model_spec()
else:
return model_spec
| 37.122124 | 93 | 0.689139 |
ace65dd5798da5e38b51139b6c63fc48a4a4052b | 3,678 | py | Python | G2Exception.py | Senzing/g2exception | 3c0be612944613b31f76cc0dc6df7829ecb1c9af | [
"Apache-2.0"
] | 1 | 2021-08-06T14:25:55.000Z | 2021-08-06T14:25:55.000Z | g2/python/G2Exception.py | Senzing/g2-python | 949263cd3b5a9763ba42b4bb2c6686843ee4f732 | [
"Apache-2.0"
] | 29 | 2021-06-16T01:45:20.000Z | 2022-03-30T18:33:42.000Z | g2/python/G2Exception.py | Senzing/g2-python | 949263cd3b5a9763ba42b4bb2c6686843ee4f732 | [
"Apache-2.0"
] | 2 | 2021-06-08T13:12:56.000Z | 2021-07-03T15:47:59.000Z |
class G2Exception(Exception):
'''Base exception for G2 related python code'''
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class G2UnsupportedFileTypeException(G2Exception):
def __init__(self, *args, **kwargs):
G2Exception.__init__(self, *args, **kwargs)
class G2InvalidFileTypeContentsException(G2Exception):
def __init__(self, *args, **kwargs):
G2Exception.__init__(self, *args, **kwargs)
class G2DBException(G2Exception):
'''Base exception for G2 DB related python code'''
def __init__(self, *args, **kwargs):
G2Exception.__init__(self, *args, **kwargs)
class UnconfiguredDataSourceException(G2Exception):
def __init__(self, DataSourceName):
G2Exception.__init__(self,("Datasource %s not configured. See https://senzing.zendesk.com/hc/en-us/articles/360010784333 on how to configure datasources in the config file." % DataSourceName ))
class G2DBUnknownException(G2DBException):
def __init__(self, *args, **kwargs):
G2DBException.__init__(self, *args, **kwargs)
class G2UnsupportedDatabaseType(G2DBException):
def __init__(self, *args, **kwargs):
G2DBException.__init__(self, *args, **kwargs)
class G2TableNoExist(G2DBException):
def __init__(self, *args, **kwargs):
G2DBException.__init__(self, *args, **kwargs)
class G2DBMNotStarted(G2DBException):
def __init__(self, *args, **kwargs):
G2DBException.__init__(self, *args, **kwargs)
class G2DBNotFound(G2DBException):
def __init__(self, *args, **kwargs):
G2DBException.__init__(self, *args, **kwargs)
class G2DBUniqueConstraintViolation(G2DBException):
def __init__(self, *args, **kwargs):
G2DBException.__init__(self, *args, **kwargs)
class G2ModuleException(G2Exception):
'''Base exception for G2 Module related python code'''
def __init__(self, *args, **kwargs):
G2Exception.__init__(self, *args, **kwargs)
class G2ModuleNotInitialized(G2ModuleException):
'''G2 Module called but has not been initialized '''
def __init__(self, *args, **kwargs):
G2ModuleException.__init__(self, *args, **kwargs)
class G2ModuleGenericException(G2ModuleException):
'''Generic exception for non-subclassed G2 Module exception '''
def __init__(self, *args, **kwargs):
G2ModuleException.__init__(self, *args, **kwargs)
class G2ModuleMySQLNoSchema(G2ModuleException):
def __init__(self, *args, **kwargs):
G2ModuleException.__init__(self, *args, **kwargs)
class G2ModuleEmptyMessage(G2ModuleException):
def __init__(self, *args, **kwargs):
G2ModuleException.__init__(self, *args, **kwargs)
class G2ModuleInvalidXML(G2ModuleException):
def __init__(self, *args, **kwargs):
G2ModuleException.__init__(self, *args, **kwargs)
class G2ModuleResolveMissingResEnt(G2ModuleException):
def __init__(self, *args, **kwargs):
G2ModuleException.__init__(self, *args, **kwargs)
class G2ModuleLicenseException(G2ModuleException):
def __init__(self, *args, **kwargs):
G2ModuleException.__init__(self, *args, **kwargs)
def TranslateG2ModuleException(ex):
exInfo = ex.decode().split('|', 1)
if exInfo[0] == '7213E':
return G2ModuleMySQLNoSchema(ex)
elif exInfo[0] == '0002E':
return G2ModuleInvalidXML(ex.decode())
elif exInfo[0] == '0007E':
return G2ModuleEmptyMessage(ex.decode())
elif exInfo[0] == '2134E':
return G2ModuleResolveMissingResEnt(ex.decode())
elif exInfo[0] == '9000E':
return G2ModuleLicenseException(ex.decode())
else:
return G2ModuleGenericException(ex.decode())
| 37.151515 | 201 | 0.703915 |
ace65ddda8dcef3a83540bdd973f9523bfecc668 | 2,960 | py | Python | fairseq/fairseq/tasks/__init__.py | insop/pytorch-hackathon | 25290663cf9e4a2c99b9864f0331667b82a0c8b2 | [
"Apache-2.0"
] | 7 | 2019-08-10T03:55:19.000Z | 2020-12-31T08:50:47.000Z | fairseq/fairseq/tasks/__init__.py | insop/pytorch-hackathon | 25290663cf9e4a2c99b9864f0331667b82a0c8b2 | [
"Apache-2.0"
] | null | null | null | fairseq/fairseq/tasks/__init__.py | insop/pytorch-hackathon | 25290663cf9e4a2c99b9864f0331667b82a0c8b2 | [
"Apache-2.0"
] | 2 | 2019-08-11T19:14:17.000Z | 2020-07-24T11:30:44.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import importlib
import os
from .fairseq_task import FairseqTask
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
import sys
def import_user_module(module_path):
# module_path = getattr(args, 'user_dir', None)
print("user_dir", module_path)
if module_path is not None:
module_path = os.path.abspath(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
sys.path.pop(0)
def setup_task(args, **kwargs):
import_user_module('/home/aakashns/SPEECH_RECOGNITION/fairseq/examples/speech_recognition')
print("task_registry", TASK_REGISTRY)
return TASK_REGISTRY[args.task].setup_task(args, **kwargs)
def register_task(name):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError('Cannot register duplicate task ({})'.format(name))
if not issubclass(cls, FairseqTask):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls
# automatically import any Python files in the tasks/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('fairseq.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser
def get_task(name):
return TASK_REGISTRY[name]
| 32.173913 | 104 | 0.659797 |
ace65ef7d25a366a81a6d4687edd08b02bc1389d | 5,267 | py | Python | experimental_data/calc_pKa_value_statistics.py | choderalab/sampl6-logD-compound-selection | b14d82e8d3dd1091baeeff62b3fe6f093c03e321 | [
"MIT"
] | 3 | 2020-07-13T23:48:50.000Z | 2022-02-28T23:29:39.000Z | experimental_data/calc_pKa_value_statistics.py | choderalab/sampl6-logD-compound-selection | b14d82e8d3dd1091baeeff62b3fe6f093c03e321 | [
"MIT"
] | 1 | 2018-02-16T16:24:59.000Z | 2018-02-16T17:09:09.000Z | experimental_data/calc_pKa_value_statistics.py | choderalab/sampl6-logD-compound-selection | b14d82e8d3dd1091baeeff62b3fe6f093c03e321 | [
"MIT"
] | 4 | 2019-06-28T14:11:22.000Z | 2022-01-06T13:22:16.000Z | # Calculating Uncertainties in Experimental pKas
# Mehtap Isik, 2018/01/25
#
# Usage: python calc_pKa_value_statistics.py
import pandas as pd
import numpy as np
from scipy import stats
import math
def reduce_to_first_significant_digit(quantity, uncertainty):
first_significant_digit = math.floor(math.log10(abs(uncertainty)))
quantity = round(quantity, -first_significant_digit)
uncertainty = round(uncertainty, -first_significant_digit)
return quantity, uncertainty
# Input experimental data and output csv file
path_to_experimental_results = "pKa_results_of_replicate_experiments.csv"
path_to_experimental_pKa_values = "pKa_experimental_values.csv"
# Read experimental results with 3 replicate measurements
df_exp_results = pd.read_csv(path_to_experimental_results)
# Create new dataframe to store pKa value statistics
df_exp_pKa = pd.DataFrame()
df_exp_pKa["Molecule ID"] = np.NaN
df_exp_pKa["pKa1 mean"] = np.NaN
df_exp_pKa["pKa1 SEM"] = np.NaN
df_exp_pKa["pKa2 mean"] = np.NaN
df_exp_pKa["pKa2 SEM"] = np.NaN
df_exp_pKa["pKa3 mean"] = np.NaN
df_exp_pKa["pKa3 SEM"] = np.NaN
df_exp_pKa["Assay Type"] = np.NaN
df_exp_pKa["Experimental Molecule ID"] = np.NaN
df_exp_pKa["canonical isomeric SMILES"] = np.NaN
# Iterate over every 3rd experiment to get molecule IDs
index_range = np.arange(0,df_exp_results.shape[0],3,dtype=int)
for i in index_range:
molecule_ID = df_exp_results.loc[i,"Molecule ID"]
assay_type = df_exp_results.loc[i,"Assay Type"]
exp_molecule_ID = df_exp_results.loc[i,"Experimental Molecule ID"]
smiles = df_exp_results.loc[i,"canonical isomeric SMILES"]
s = pd.Series([molecule_ID, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, assay_type, exp_molecule_ID, smiles], index = df_exp_pKa.columns)
df_exp_pKa = df_exp_pKa.append(s, ignore_index=True)
# Calculate mean and SEM for pKa values of each molecule
for i, row in enumerate(df_exp_pKa.iterrows()):
molecule_ID = row[1]["Molecule ID"]
pKa1_SEM = np.NaN
pKa2_SEM = np.NaN
pKa3_SEM = np.NaN
# Parse pKa values of each replicate experiment for each molecule ID
df_exp_result = df_exp_results.loc[df_exp_results["Molecule ID"] == molecule_ID]
pKa1_array = df_exp_result["pKa1"]
pKa2_array = df_exp_result["pKa2"]
pKa3_array = df_exp_result["pKa3"]
# Calculate mean of 3 replicates format(a, '.2f')
pKa1_mean = float(format(np.mean(pKa1_array), '.2f'))
pKa2_mean = float(format(np.mean(pKa2_array), '.2f'))
pKa3_mean = float(format(np.mean(pKa3_array), '.2f'))
#pKa2_mean = np.mean(pKa2_array)
#pKa3_mean = np.mean(pKa3_array)
# Calculate standard error of the mean (SEM)
# ddof=0 provides a maximum likelihood estimate of the variance for normally distributed variables
pKa1_SEM = stats.sem(pKa1_array, ddof = 0)
pKa2_SEM = stats.sem(pKa2_array, ddof = 0)
pKa3_SEM = stats.sem(pKa3_array, ddof = 0)
#print(molecule_ID,pKa1_SEM)
# Reduce SEM values to 1st significat digit
# Since pKa experimental data was reported in 2 decimal points,
# SEM will be reported as 0.01 if calculated SEM value from 3 replicates is lower than 0.01.
minimum_SEM = float(0.01)
if pKa1_SEM == 0:
pKa1_SEM = minimum_SEM
elif (np.isnan(pKa1_SEM) == False):
pKa1_SEM = max(minimum_SEM, reduce_to_first_significant_digit(pKa1_mean, pKa1_SEM)[1])
if pKa2_SEM == 0:
pKa2_SEM = minimum_SEM
elif np.isnan(pKa2_SEM) == False:
pKa2_SEM = max(minimum_SEM, reduce_to_first_significant_digit(pKa2_mean, pKa2_SEM)[1])
if pKa3_SEM == 0:
pKa3_SEM = minimum_SEM
elif np.isnan(pKa3_SEM) == False:
pKa3_SEM = max(minimum_SEM, reduce_to_first_significant_digit(pKa3_mean, pKa3_SEM)[1])
# Write mean and SEM values to df_exp_pKa dataframe
df_exp_pKa.loc[i, "pKa1 mean"] = str(format(pKa1_mean, '.2f'))
df_exp_pKa.loc[i, "pKa2 mean"] = str(format(pKa2_mean, '.2f'))
df_exp_pKa.loc[i, "pKa3 mean"] = str(format(pKa3_mean, '.2f'))
df_exp_pKa.loc[i, "pKa1 SEM"] = str(format(pKa1_SEM, '.2f'))
df_exp_pKa.loc[i, "pKa2 SEM"] = str(format(pKa2_SEM, '.2f'))
df_exp_pKa.loc[i, "pKa3 SEM"] = str(format(pKa3_SEM, '.2f'))
# Replace "nan" strings with empty cells in the dataframe.
for i,row in enumerate(df_exp_pKa.iterrows()):
pKa1_mean = row[1]["pKa1 mean"]
pKa1_SEM = row[1]["pKa1 SEM"]
pKa2_mean = row[1]["pKa2 mean"]
pKa2_SEM = row[1]["pKa2 SEM"]
pKa3_mean = row[1]["pKa3 mean"]
pKa3_SEM = row[1]["pKa3 SEM"]
if pKa1_mean == "nan":
pKa1_mean = ""
if pKa1_SEM == "nan":
pKa1_SEM = ""
if pKa2_mean == "nan":
pKa2_mean = ""
if pKa2_SEM == "nan":
pKa2_SEM = ""
if pKa3_mean == "nan":
pKa3_mean = ""
if pKa3_SEM == "nan":
pKa3_SEM = ""
df_exp_pKa.loc[i, "pKa1 mean"] = pKa1_mean
df_exp_pKa.loc[i, "pKa1 SEM"] = pKa1_SEM
df_exp_pKa.loc[i, "pKa2 mean"] = pKa2_mean
df_exp_pKa.loc[i, "pKa2 SEM"] = pKa2_SEM
df_exp_pKa.loc[i, "pKa3 mean"] = pKa3_mean
df_exp_pKa.loc[i, "pKa3 SEM"] = pKa3_SEM
# Save pKa mean and SEM values in a CSV file.
df_exp_pKa.to_csv(path_to_experimental_pKa_values, index=False)
print("Done.")
| 37.091549 | 145 | 0.692045 |
ace65f3310fdec7927e185374436d2f4654c2b8f | 169,412 | py | Python | uvotpy/uvotio.py | zexixing/UVOTPY-for-asteroids | e44772ebb651ff497dc8e5bec0735579ce6b55ce | [
"BSD-3-Clause"
] | null | null | null | uvotpy/uvotio.py | zexixing/UVOTPY-for-asteroids | e44772ebb651ff497dc8e5bec0735579ce6b55ce | [
"BSD-3-Clause"
] | null | null | null | uvotpy/uvotio.py | zexixing/UVOTPY-for-asteroids | e44772ebb651ff497dc8e5bec0735579ce6b55ce | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: iso-8859-15 -*-
#
# This software was written by N.P.M. Kuin (Paul Kuin)
# Copyright N.P.M. Kuin
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from future.builtins import input
from future.builtins import str
from future.builtins import range
__version__ = "1.7.0"
# version 1.0 9 Nov 2009
# version 1.1 21 Jan 2010 : adjust range for V grism
# version 1.2 1 Nov 2011 : update the LSF in the RMF file (slow)
# version 1.3 dec 2011: rewrite of writeOutput(), added rate2flux(), write_response_arf(),
# SpecResp(), write_rmf_file(), and helper writeOutput_()
# version 1.4 Sep 2012: added support for coi loss correction, anchor dependence of response curve XYSpecResp
# version 1.5 Mar 5, 2013
# version 1.5.1 April 11, 2013 fixed argument call XYspecresp
# version 1.5.2 October 29, 2013 added 'f' or 'g' to output filenames indicates
# lenticular filters or uvotgraspcorr used for anchor
# pyfits -> fits
# version 1.5.3 January 1, 2014 added aperture correction to background
# update of write spectrum: add code for a dictionary to be passed
# add code for xspec output with coi-correction done (background-corrected,
# coi-corrected rates, and errors)
# changed error computation, aperture corrected, and assume background error negligible
# version 1.5.4 February 27, 2014 updated effective area files, updated write_rmf_file
# version 1.5.5 May 1, 2014, update of coi-computation
# version 1.5.6 June 3, 2014, use fixed coi-area width
# version 1.5.7 July 23, 2014, use coi-box and factor as calibrated
# changed rate2flux api to pass boolean for points not too bright
# version 1.5.8 November 2014, updated fits "new_table" to "BinTableHDU.from_columns" This breaks probably
# for the old pyfits and older astropy versions, but "new_table" will be discontinued soon.
# Jan 2015 fixed a typo (bracket) in write_rmf_file
# version 1.6.0 March 11 2016, update all the fits header.update statements to revised standard astropy
# version 1.7.0 December 30, 2017, update to the sensitivity correction (provisional) affecting the 1700-3000A
'''
try:
from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotgetspec
except:
pass
'''
import uvotgetspec
typeNone = type(None)
interactive=uvotgetspec.interactive
def get_uvot_observation(coordinate=None,name=None,obsid=None,chatter=0):
'''the purpose is to grab the uvot data from the archive '''
# Under development
return
def rate2flux(wave, rate, wheelpos,
bkgrate=None, pixno=None,
co_sprate = None,
co_bgrate = None,
arf1=None, arf2=None,
effarea1=None, effarea2=None,
spectralorder=1,
#trackwidth = 1.0, obsoleted uvotpy version 2.0.2
anker=None, test=None,
msg = "",
respfunc=False,
swifttime=None, option=1,
fudgespec=1.,
frametime=0.0110329,
#sig1coef=[3.2], sigma1_limits=[2.6,4.0], obsoleted uvotpy version 2.0.2
debug=False, chatter=1):
'''
Convert net count rate to flux
WARNING: dependent on the parameters passed, the old CALDB (<=2012)
OR the new flux calibration will be used. Since 10SEP2012 the
coi-factor is included in the calculation of the flux and the
effective area. A coi-correction is still made when using the old
CALDB which will be inconsistent to that calculated in the
writeSpectrum() which makes the output file.
many of the parameters are needed to calculate the coi-factor
Parameters
----------
wave : float ndarray
wavelength in A
rate, bkrate : float ndarray
net and background count rate/bin in spectrum, aperture corrected
co_sprate, co_bgrate : ndarray
total of spectrum+background and background rate/bin for the
coincidence area of constant width (default set to 16 pixels)
wheelpos : int
filter wheel position
pixno : ndarray
pixel coordinate (zero = anchor; + increasing wavelengths)
co_sprate, cp_bgrate : ndarray
rates for calculating the coincidence loss
arf1, arf2 : path or "CALDB", optional
effarea1, effarea2 : FITS HDU[, interpolating function]
result from a previous call to readFluxCalFile() for first or second order
spectralorder : int
the spectral order of the spectrum, usually =1
trackwidth : float
width of the spectral extraction used in units of sigma
anker : list
anchor detector coordinate positions (pix) as a 2-element numpy array
frametime : float
the frame time for the image is required for the coi-correction
swifttime : int
swift time of observation in seconds for calculating the sensitivity loss
debug : bool
for development
chatter : int
verbosity (0..5)
respfunc : bool
return the response function (used by writeSpectrum())
Returns
-------
(flux, wave, coi_valid) : tuple
coi-corrected flux type interp1d,
array wave, and matching boolean array for points not too bright
for coincidence loss correction
Notes
-----
2013-05-05 NPMKuin - adding support for new flux calibration files; new kwarg
2014-02-28 fixed. applying fnorm now to get specrespfunc, pass earlier effective area
2014-04-30 NPMK changed coi_func parameters (option=1,fudgespec=1.322,frametime,coi_length=29)
'''
import numpy as np
from uvotgetspec import coi_func
from scipy import interpolate
__version__ = '130505'
h_planck = 6.626e-27 # erg/s
lightspeed = 2.9979e10 # cm/sec
h_c_ang = h_planck * lightspeed * 1e8 # ergs-Angstrom
hnu = h_c_ang/(wave)
# assume uv grism
if pixno is None:
dis = np.arange(len(wave)) - 400 # see uvotgetspec.curved_extraction()
if spectralorder == 2: dis -= 260
else:
dis = pixno
coef = np.polyfit(dis,wave,4)
binwidth = np.polyval(coef,dis+0.5) - np.polyval(coef,dis-0.5) # width of each bin in A (= scale A/pix)
if ((spectralorder == 1) & (effarea1 == None)) | ((spectralorder == 2) & (effarea2 == None)) :
print('test:',test==None)
print('anker:',anker==None)
if (test != None) & (anker.size):#(anker != None):
# attempt to use the new spectral response (if returns None then not available)
z = readFluxCalFile(wheelpos,anchor=anker,spectralorder=spectralorder, msg=msg, chatter=chatter)
if (z == None):
specrespfunc = XYSpecResp(wheelpos=wheelpos, \
spectralorder=spectralorder, anker=anker, test=test,chatter=chatter)
else:
# ancher given, default
hdu,fnorm, msg = z
w = list(0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX']))
r = list(( hdu.data['SPECRESP'] )*fnorm(w))
w.reverse() ; r.reverse()
specrespfunc = interpolate.interp1d( w, r, bounds_error=False, fill_value=np.NaN)
#elif ((anker == None) | ((arf1 != None) & (spectralorder == 1))| ((arf2 != None) & (spectralorder == 2))):
elif ((not anker.size) | ((arf1 != None) & (spectralorder == 1))| ((arf2 != None) & (spectralorder == 2))):
if spectralorder == 2: xopt='nearest'
ans = readFluxCalFile(wheelpos,spectralorder=spectralorder,msg=msg, chatter=chatter, option=xopt)
print(type(ans))
print(ans)
if (ans[0] == None):
specrespfunc = SpecResp(wheelpos, spectralorder, arf1=arf1,arf2=arf2,)
else:
hdu, msg = ans
w = list(0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX']))
r = list(hdu.data['SPECRESP'] )
w.reverse() ; r.reverse()
specrespfunc = interpolate.interp1d( w, r, bounds_error=False, fill_value=np.NaN)
else:
# attempt to use the new spectral response (if returns None then not available)
if (effarea1 != None) & (spectralorder == 1):
z = effarea1
elif (effarea2 != None) & (spectralorder == 2):
z = effarea2
else:
z = readFluxCalFile(wheelpos,anchor=anker,spectralorder=spectralorder,msg=msg, chatter=chatter) #~~FIXME:
if (z == None):
print("uvotio.rate2flux warning: fall back to XYSpecResp call ")
specrespfunc = XYSpecResp(wheelpos=wheelpos, spectralorder=spectralorder, anker=anker,chatter=chatter)
else:
#HERE!(ZEXI)
# ancher given, default
hdu,fnorm,msg = z
w = list(0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX']) )
r = list(hdu.data['SPECRESP'])
w.reverse()
r.reverse()
r = np.array(r) * fnorm(w)
specrespfunc = interpolate.interp1d( w, r, bounds_error=False, fill_value=np.NaN )
elif ((spectralorder == 1) & (effarea1 != None)):
if len(effarea1) == 2:
hdu,fnorm = effarea1
w = 0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX'])
r = hdu.data['SPECRESP']
ii = list(range(len(w)-1,-1,-1))
r = r * fnorm(w)
specrespfunc = interpolate.interp1d( w[ii], r[ii], bounds_error=False, fill_value=0)#np.NaN )
else:
hdu = effarea1
w = 0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX'])
r = hdu.data['SPECRESP']
ii = list(range(len(w)-1,-1,-1))
specrespfunc = interpolate.interp1d( w[ii], r[ii], bounds_error=False, fill_value=np.NaN )
elif ((spectralorder == 2) & (effarea2 != None)): #this is under development only
print("second order Effective area is under development - not for science use")
if len(effarea2) == 2:
hdu,fnorm = effarea2
w = 0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX'])
r = hdu.data['SPECRESP']
ii = list(range(len(w)-1,-1,-1))
r = r * fnorm(w)
specrespfunc = interpolate.interp1d( w[ii], r[ii], bounds_error=False, fill_value=np.NaN )
else:
hdu = effarea2[0]
w = 0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX'])
r = hdu.data['SPECRESP']
ii = list(range(len(w)-1,-1,-1))
specrespfunc = interpolate.interp1d( w[ii], r[ii], bounds_error=False, fill_value=np.NaN )
else: return None
if respfunc: return specrespfunc # feed for writeSpectrum()
#w = np.array(w)
#wave = wave[wave<np.max(w)]
#wave = wave[wave>np.min(w)]
#print(wave)
if swifttime != None:
senscorr = sensitivityCorrection(swifttime,wave=wave,wheelpos=wheelpos)
print("Sensitivity correction factor for degradation over time = ", senscorr)
msg += "Sensitivity correction factor for degradation over time = %s\n"%( senscorr)
else:
senscorr = 1.0
print("NO Sensitivity correction applied")
msg += "NO Sensitivity correction applied\n"
if ((type(bkgrate) != typeNone) & (type(pixno) != typeNone)):
if chatter > 0: print("performing the COI correction ")
# do the coi-correction
fcoi, coi_valid = uvotgetspec.coi_func(pixno,wave,
co_sprate,
co_bgrate,
wheelpos = wheelpos,
fudgespec=fudgespec,
frametime=frametime,
background=False,
debug=False,chatter=1)
bgcoi = uvotgetspec.coi_func(pixno,wave,
co_sprate,
co_bgrate,
wheelpos = wheelpos,
fudgespec=fudgespec,
frametime=frametime,
background=True, \
debug=False,chatter=1)
netrate = rate*fcoi(wave)
flux = hnu*netrate*senscorr/specrespfunc(wave)/binwidth # [erg/s/cm2/angstrom]
else:
if chatter > 0:
print("WARNING rate2flux: Flux calculated without a COI-correction for spectral order = ",spectralorder)
# no coi correction
flux = hnu*rate*senscorr/specrespfunc(wave)/binwidth # [erg/s/cm2/angstrom]
coi_valid = np.ones(len(wave),dtype=bool)
return (flux, wave, coi_valid)
def sensitivityCorrection(swifttime,wave=None,sens_rate=0.01,wheelpos=0):
'''
give the sensitivity correction factor
Actual flux = observed flux(date-obs) times the sensitivity correction
Parameters
----------
swifttime : float
time of observation since 2005-01-01 00:00:00 in seconds, usually TSTART
sens_rate : float
the yearly percentage loss in sensitivity
wave : array, optional
the wavelength for (future) in case sensitivity(time, wavelength)
Notes
-----
A 1%/year decay rate since 2005-01-01 has been assumed and
the length of the mean Gregorian year was used
'''
from scipy.interpolate import interp1d
import numpy as np
# added boolean switch for sensitivity calibration activities 2015-06-30
if uvotgetspec.senscorr:
sens_corr = 1.0/(1.0 - sens_rate*(swifttime-126230400.000)/31556952.0 )
if wave is not None and (wheelpos == 160):
fscale =(swifttime-126230400.000) / (12.6*365.26*86400) #~~TODO:
fscale = 1
extracorr = np.array(
[[1.650e+03, 0.19],
[ 1.68810484e+03, 1.93506494e-01],
[ 1.70579637e+03, 3.42857143e-01],
[ 1.72757056e+03, 5.52813853e-01],
[ 1.75206653e+03, 6.93506494e-01],
[ 1.79153226e+03, 7.93073593e-01],
[ 1.82419355e+03, 8.58008658e-01],
[ 1.89223790e+03, 9.05627706e-01],
[ 1.96436492e+03, 9.55411255e-01],
[ 2.04465726e+03, 9.98701299e-01],
[ 2.10317540e+03, 1.00735931e+00],
[ 2.17121976e+03, 1.02900433e+00],
[ 2.25423387e+03, 1.04415584e+00],
[ 2.32227823e+03, 1.05281385e+00],
[ 2.42026210e+03, 1.08961039e+00],
[ 2.52096774e+03, 1.09177489e+00],
[ 2.60398185e+03, 1.09177489e+00],
[ 2.72101815e+03, 1.09610390e+00],
[ 2.77953629e+03, 1.07445887e+00],
[ 2.83805444e+03, 1.07229437e+00],
[ 2.90745968e+03, 1.05714286e+00],
[ 2.96597782e+03, 1.05281385e+00],
[ 2.99863911e+03, 1.02900433e+00],
[3000,1.],[8000,1.]])
f_extrasenscorr=interp1d(extracorr[:,0],extracorr[:,1],
bounds_error=False,fill_value=1.0)
sens_corr=sens_corr/(f_extrasenscorr(wave) * fscale)
#print(fscale, sens_corr)
print ("\nsensitivityCorrection: applied additional changes in UV 1700-3000\n")
return sens_corr*wave/wave
else:
return 1.0
def angstrom2kev(lamb,unit='angstrom'):
"""
conversion of units
Parameter
---------
lamb : array
Input photon wavelength in angstrom
Returns
-------
The photon energy in keV.
"""
import numpy
return 12.3984191/numpy.asarray(lamb)
def kev2angstrom(E,unit='keV'):
"""
conversion of units
Parameter
---------
E : array
Input photon energy in keV.
Returns
-------
The photon wavelength in angstroms
"""
import numpy
return 12.3984191/numpy.asarray(E)
def fileinfo(filestub,ext,lfilt1=None, directory='./',chatter=0, wheelpos=None, twait=40.0):
'''finds files for spectrum, matching attitude and lenticular images
uncompresses gzipped files if found
Parameters
----------
filestub : str
the base of the file name, with the Swift project convention,
consisting of "sw" + the OBSID, i.e., "sw00032301001"
ext : int
the number of the extension of the grism file to match
kwargs : dict
- **lfilt1** : str, optional
name of first lenticular filter used. Must be one of 'uvw2',
'uvm2','uvw1','u','b','v','wh'
- **directory** : path, str
path for directory. This is the directory with the grism file.
- **chatter** : int
verbosity
- **twait** : float
The maximum time allowed between begin and end of matched
exposures of grism-lenticular filter, for each match.
- **wheelpos** : imt
If given, use to discriminate between UV and Visual grisms.
Returns
-------
specfile, attfile: str
filename of spectrum,
the attitude file name.
lfilt1, lfilt2 : str
lenticular filter file name before the grism exposure or None,
and the file name of the lenticular filter following the grism
lfilt1_ext,lfilt2_ext : int
extension number for each lenticular filter matching exposure
'''
import os
try:
from astropy.io import fits
except:
import pyfits as fits
from numpy import array
ext_names =array(['uw2','um2','uw1','uuu','ubb','uvv','uwh'])
lfiltnames=array(['uvw2','uvm2','uvw1','u','b','v','wh'])
vvgrism = True
uvgrism = True
if wheelpos != None:
if wheelpos < 500:
vvgrism = False
specfile = directory+filestub+'ugu_dt.img'
else:
specfile = directory+filestub+'ugv_dt.img'
uvgrism = False
if (not directory.endswith('/')) :
directory += '/'
auxildir = directory+'../../auxil/'
attfile = None
# test if u or v grism file and set variable
specfile = ' *filename not yet initialised (directory wrong?)* '
if uvgrism & os.access(directory+filestub+'ugu_dt.img',os.F_OK):
specfile = directory+filestub+'ugu_dt.img'
if chatter > 1: print('reading ',specfile)
elif uvgrism & os.access(directory+filestub+'ugu_dt.img.gz',os.F_OK):
specfile = directory+filestub+'ugu_dt.img'
os.system( 'gunzip '+specfile+'.gz' )
if chatter > 1: print('reading ',specfile)
elif vvgrism & os.access(directory+filestub+'ugv_dt.img',os.F_OK):
specfile = directory+filestub+'ugv_dt.img'
if chatter > 1: print('reading ',specfile)
elif vvgrism & os.access(directory+filestub+'ugv_dt.img.gz',os.F_OK):
specfile = directory+filestub+'ugv_dt.img'
os.system( 'gunzip '+specfile+'.gz' )
if chatter > 1: print('reading ',specfile)
else:
print("on call fileinfo(sw+obsid="+filestub+",ext=",ext,",lfilt1=",lfilt1,", directory="+directory,",wheelpos=",wheelpos,")")
raise IOError("FILEINFO: cannot find %s: DET file not found - pls check directory/file provided is correct" % specfile )
# attitude file:
if os.access(directory+filestub+'pat.fits',os.F_OK):
attfile = directory+filestub+'pat.fits'
if chatter > 1: print('found att file ',attfile)
elif os.access(directory+filestub+'pat.fits.gz',os.F_OK):
attfile = directory+filestub+'pat.fits'
os.system( 'gunzip '+attfile+'.gz' )
if chatter > 1: print('found att file ',attfile)
elif os.access(directory+filestub+'uat.fits',os.F_OK):
attfile = directory+filestub+'uat.fits'
if chatter > 1: print('found att file ',attfile)
elif os.access(directory+filestub+'uat.fits.gz',os.F_OK):
attfile = directory+filestub+'uat.fits'
os.system( 'gunzip '+attfile+'.gz' )
if chatter > 1: print('found att file ',attfile)
elif os.access(directory+filestub+'sat.fits',os.F_OK):
attfile = directory+filestub+'sat.fits'
if chatter > 1: print('found att file ',attfile)
elif os.access(directory+filestub+'sat.fits.gz',os.F_OK):
attfile = directory+filestub+'sat.fits'
os.system( 'gunzip '+attfile+'.gz' )
if chatter > 1: print('found att file ',attfile)
elif os.access(auxildir+filestub+'pat.fits',os.F_OK):
attfile = auxildir+filestub+'pat.fits'
if chatter > 1: print('found att file ',attfile)
elif os.access(auxildir+filestub+'pat.fits.gz',os.F_OK):
attfile = auxildir+filestub+'pat.fits'
os.system( 'gunzip '+attfile+'.gz' )
if chatter > 1: print('found att file ',attfile)
elif os.access(auxildir+filestub+'uat.fits',os.F_OK):
attfile = auxildir+filestub+'uat.fits'
if chatter > 1: print('found att file ',attfile)
elif os.access(auxildir+filestub+'uat.fits.gz',os.F_OK):
attfile = auxildir+filestub+'uat.fits'
os.system( 'gunzip '+attfile+'.gz' )
if chatter > 1: print('found att file ',attfile)
elif os.access(auxildir+filestub+'sat.fits',os.F_OK):
attfile = auxildir+filestub+'sat.fits'
if chatter > 1: print('found att file ',attfile)
elif os.access(auxildir+filestub+'sat.fits.gz',os.F_OK):
attfile = auxildir+filestub+'sat.fits'
os.system( 'gunzip '+attfile+'.gz' )
if chatter > 1: print('found att file ',attfile)
# filter file(s)
lfilt1,lfilt2 = None,None
lfilt1_ext = None; lfilt2_ext=None
hdu = fits.open(specfile)
if len(hdu)-1 < ext:
raise IOError("Input error: extension not found in Grism file.")
hdr = hdu[int(ext)].header
hdu.close()
#hdr = fits.getheader(specfile,int(ext))
tstart = hdr['TSTART']
tstop = hdr['TSTOP']
if chatter > 1:
print('grism times : %s - %s '%(tstart,tstop))
lfile=None
#
for k in range(len(ext_names)):
ftyp = ext_names[k]
lfiltyp = lfiltnames[k]
if chatter > 1: print("testting for "+directory+filestub+ftyp+'_sk.img')
if os.access(directory+filestub+ftyp+'_sk.img',os.F_OK):
lfile = directory+filestub+ftyp+'_sk.img'
if chatter > 1:
print('found lenticular sky file ',lfile)
elif os.access(directory+filestub+ftyp+'_sk.img.gz',os.F_OK):
lfile = directory+filestub+ftyp+'_sk.img'
os.system( 'gunzip '+lfile+'.gz' )
if chatter > 1: print('found lenticular sky file ',lfile)
if lfile != None:
# check if it has an extension before or after specfile[ext]
xxx = fits.open(lfile)
for i in range(1,len(xxx)):
t1 = xxx[i].header['TSTART']
t2 = xxx[i].header['TSTOP']
if abs(t2-tstart) < twait:
lfilt1 = lfiltyp
lfilt1_ext = i
if chatter > 0: print("lenticular file observation preceeding grism observation")
if abs(t1-tstop) < twait:
lfilt2 = lfiltyp
lfilt2_ext = i
if chatter > 1: print("lenticular file observation after grism observation")
lfile = None
xxx.close()
# wrapup in case there is only one, but in lfilt2.
if ((lfilt1 == None) & (lfilt2 != None)):
if chatter > 2: print("putting only filter info in filter 1")
lfilt1 = lfilt2
lfilt2 = None
lfilt1_ext = lfilt2_ext
lfilt2_ext = None
#
if attfile == None:
raise IOError("The attitude file could not be found.")
return specfile, lfilt1, lfilt1_ext, lfilt2, lfilt2_ext, attfile
def writeEffAreaFile (wheelpos,spectralorder,wave,specresp,specresp_err=None,
anker=None,dxy_anker=None,fileversion='999',todir="./",rebin=True,
clobber=False):
''' create an ARF file
Parameters
----------
wheelpos : int, {160,200,955,1000}
spectralorder: int, {1,2}
wave: ndarray
wavelengths in Angstrom
specresp: ndarray
effective area (EA) in cm^2 for each wave
specresp_err: ndarray
1-sigma EA error (random + systematic)
anker: list, ndarray[2]
2-element array with position in det coordinates of EA
dxy_anker: list,ndarray[2]
EA determined for box [anker[0]+/-dxy_anker[0], anker[1]+/-dxy_anker[1]]
fileversion: str
version for this EA (spectral response) file.
todir: path
directory to place the file into
rebin : bool
When true (old behaviour) bin 1 A in wavelength
When False, make one bin for each point in array wave.
Returns
-------
the new effective area file with file name something like:
'swugu0160_ax1100ay1100_dx150dy150_o1_20041120v001.arf'
Notes
-----
- Modified 15-SEP-2012 by Paul Kuin.
With only wheelpos, spectralorder, wave, specresp input, the output file conforms to
the HEASARC approved response file. The additional keywords and error column have not
been approved as of 15 September 2012.
- Modified 13 Feb 2013 by Paul Kuin
Added futher keyword COIAWARE to discriminate between the old and new effective areas and
changed comments after keywords to be more descriptive.
- Modified 5 March 2013 by Paul Kuin
header edited
- Renamed 28 Dec 2013
first extension assumed 1-spaced wavelengths. Relaxed to allow variable wavelengths.
- changed to reflect use of full coi-solution 2014-08-20. Paul Kuin
- added no rebinning as option. It actually will rebin slightly by calculating
the minimum value of the bin from the distance of its neighbors, and the maximum
value is chosen to have no gaps between bins.
'''
try:
from astropy.io import fits
except:
import pyfits as fits
import datetime
import numpy as np
from scipy import interpolate
import os
version = '20140820'
a = now = datetime.date.today()
datestring = a.isoformat()[0:4]+a.isoformat()[5:7]+a.isoformat()[8:10]
rnu = now.day*1.2+now.month*0.99+now.year*0.3
# file name elements:
of1 = '_20041120v'+fileversion+'.arf'
if spectralorder == 1: of0 = '_o1'
if spectralorder == 2: of0 = '_o2'
of2 = ''
if (anker.size):#(anker != None):
of2 = '_ax'+str(anker[0])+'ay'+str(anker[1])
if (dxy_anker.size):#(dxy_anker != None):
of2 += '_dx'+str(dxy_anker[0])+'dy'+str(dxy_anker[1])
if wheelpos == 160:
if spectralorder == 1:
EXTNAME='SPECRESPUGRISM160'
outfile = todir+'swugu0160'+of2+of0+of1
elif spectralorder == 2:
EXTNAME = 'SPECRESP0160GRISM2NDORDER'
outfile = todir+'swugu0160'+of2+of0+of1
filtername = 'UGRISM'
elif wheelpos == 200:
if spectralorder == 1:
EXTNAME='SPECRESPUGRISM200'
outfile = todir+'swugu0200'+of2+of0+of1
elif spectralorder == 2:
EXTNAME = 'SPECRESP0200GRISM2NDORDER'
outfile = todir+'swugu0200'+of2+of0+of1
filtername = 'UGRISM'
elif wheelpos == 955:
if spectralorder == 1:
EXTNAME='SPECRESPVGRISM955'
outfile = todir+'swugv0955'+of2+of0+of1
elif spectralorder == 2:
EXTNAME = 'SPECRESP0955GRISM2NDORDER'
outfile = todir+'swugv0955'+of2+of0+of1
filtername = 'VGRISM'
elif wheelpos == 1000:
if spectralorder == 1:
EXTNAME='SPECRESPVGRISM1000'
outfile = todir+'swugv1000'+of2+of0+of1
elif spectralorder == 2:
EXTNAME = 'SPECRESP1000GRISM2NDORDER'
outfile = todir+'swugv1000'+of2+of0+of1
filtername = 'VGRISM'
specrespfunc = interpolate.interp1d(wave, specresp, kind='linear', bounds_error=False,
fill_value=0. )
specresp_errfunc = interpolate.interp1d(wave, specresp_err, kind='linear',
bounds_error=False, fill_value=0. )
hdu = fits.PrimaryHDU()
hdulist=fits.HDUList([hdu])
hdulist[0]['TELESCOP']=('SWIFT ','Telescope (mission) name')
hdulist[0]['INSTRUME']=('UVOTA ','Instrument Name')
hdulist[0]['COMMENT']='Grism Effective area'
hdulist[0]['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
# first extension SPECRESP
if rebin:
binwidth = 1.0 # scalar 1A binning
ax = np.arange(int(min(wave)),int(max(wave)),binwidth)
NW = len(ax)
wavmin = (ax-0.5*binwidth)
wavmax = (ax+0.5*binwidth)
else:
NW = len(wave)
ax = np.empty(NW,dtype=float)
binw = np.empty(NW,dtype=float)
binw[1:-1] = 0.5*(wave[2:]-wave[:-2])
wavmin = np.empty(NW,dtype=float)
wavmin[1:-1] = wave[1:-1]-0.5*binw[1:-1]
wavmin[0] = wave[0]-0.5*binw[1]
wavmin[-1] = wave[-1]-0.5*binw[-2]
wavmax = np.empty(NW,dtype=float)
wavmax[:-1] = wavmin[1:]
wavmax[-1] = wavmax[-2] + binw[-2]
# note: if there is a mix of small and big steps in wave, then this scheme will
# find bins with the center outside the bin. The result is a grid closer to regular.
binwidth = wavmax-wavmin # array
midwave = 0.5*(wavmax+wavmin)
energy_lo = angstrom2kev(wavmax)
energy_hi = angstrom2kev(wavmin)
elow = energy_lo.min()
ehigh = energy_hi.max()
specresp = specrespfunc( midwave )
specresp_err = specresp_errfunc( midwave )
ix = list(range(len(ax)))
ix.reverse()
col11 = fits.Column(name='ENERG_LO',format='E',array=energy_lo[ix],unit='KeV')
col12 = fits.Column(name='ENERG_HI',format='E',array=energy_hi[ix],unit='KeV')
col13 = fits.Column(name='WAVE_MIN',format='E',array=wavmin[ix],unit='angstrom')
col14 = fits.Column(name='WAVE_MAX',format='E',array=wavmax[ix],unit='angstrom')
col15 = fits.Column(name='SPECRESP',format='E',array=specresp[ix],unit='cm**2' )
if specresp_err == None:
cols1 = fits.ColDefs([col11,col12,col13,col14,col15])
else:
col16 = fits.Column(name='SPRS_ERR',format='E',array=specresp_err[ix],unit='cm**2' )
cols1 = fits.ColDefs([col11,col12,col13,col14,col15,col16])
tbhdu1 = fits.BinTableHDU.from_columns(cols1)
tbhdu1.header['EXTNAME']=(EXTNAME,'Name of this binary table extension')
tbhdu1.header['TELESCOP']=('Swift','Telescope (mission) name')
tbhdu1.header['INSTRUME']=('UVOTA','Instrument name')
tbhdu1.header['FILTER']=filtername
tbhdu1.header['ORIGIN']=('UCL/MSSL','source of FITS file')
tbhdu1.header['CREATOR']=('uvotio.py','uvotpy python library')
tbhdu1.header['COMMENT']=('uvotpy sources at www.github.com/PaulKuin/uvotpy')
tbhdu1.header['VERSION']=fileversion
tbhdu1.header['FILENAME']=(outfile,'file NAME')
tbhdu1.header['HDUCLASS']=('OGIP','format conforms to OGIP standard')
tbhdu1.header['HDUCLAS1']=('RESPONSE','RESPONSE DATA')
tbhdu1.header['HDUCLAS2']=('SPECRESP','type of calibration data')
tbhdu1.header['CCLS0001']=('CPF','dataset is a calibration product file')
tbhdu1.header['CCNM0001']=('SPECRESP','Type of calibration data')
tbhdu1.header['CDES0001']=(filtername+' SPECTRAL RESPONSE','Description')
tbhdu1.header['CDTP0001']=('DATA','Calibration file contains data')
tbhdu1.header['CVSD0001']=('2004-11-20','UTC date when calibration should first be used')
tbhdu1.header['CVST0001']=('00:00:00','UTC time when calibration should first be used')
tbhdu1.header['CBD10001']=('FILTER('+filtername+')','Parameter boundary')
tbhdu1.header['CBD20001']=('ENERG('+str(elow)+'-'+str(ehigh)+')keV','spectral range')
tbhdu1.header['CBD30001']=('RADIUS(0-10)pixel','Parameter boundary')
tbhdu1.header['CBD40001']=('THETA(0-17)arcmin','Parameter boundary')
tbhdu1.header['CBD50001']=('WHEELPOS('+str(wheelpos)+')','Filter/Mode Selection')
tbhdu1.header['CBD60001']=('ORDER('+str(spectralorder)+')','spectral order')
if (anker.size) & (dxy_anker.size): #(anker != None) & (dxy_anker != None):
tbhdu1.header['CBD70001']=('ANCHOR(%6.1f,%6.1f)'%(anker[0],anker[1]),'anchor in pix (1100.5,1100.5)pix=(0,0)mm')
tbhdu1.header['CBD80001']=('ANCHOR_RANGE(%f4.0,%4.0f)'%(dxy_anker[0],dxy_anker[1]),'calibrated range from anchor')
tbhdu1.header['CBD90001']=('COIAWARE('+'T'+')','pile-up effect taken out')
tbhdu1.header['COIVERS']=('2','full solution')
tbhdu1.header['TTYPE1']=('ENERG_LO','[keV] Lower boundary of energy bin')
tbhdu1.header['TTYPE2']=('ENERG_HI','[keV] Upper boundary of energy bin')
tbhdu1.header['TTYPE5']=('SPECRESP','[cm**2] Effective Area')
tbhdu1.header['COMMENT']= 'The effective area was determined using version 2 of the '
tbhdu1.header['COMMENT']= 'coincidence loss'
tbhdu1.header['COMMENT']=('uvotpy.writeEffAreaFile() version='+version)
tbhdu1.header['COMMENT']=('created '+datestring)
tbhdu1.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
if specresp_err != None:
tbhdu1.header['TTYPE6']=('SPRS_ERR','[cm**2] 1-sigma error effective area')
hdulist.append(tbhdu1)
hdulist.writeto(outfile,clobber=clobber)
def XYSpecResp(wheelpos=None,spectralorder=1,anker=[1129,1022], test=None, chatter=0):
''' the spectral response based on the position of the anchor of the spectrum.
Depends on the grism mode via 'wheelpos' and the spectral order.
Parameters
----------
wheelpos : int
kwargs : dict
- spectralorder : int
order of the spectrum
- anker : list
position in detector coordinates (pixels)
- test : any
if not None then get the response at the boresight
Returns
-------
An interpolating function for the spectral response
based on the position (Xank,Yank) of the anchor of the spectrum.
Depends on the grism mode via 'wheelpos' and the spectral order.
Notes
-----
Will be superseded by `readFluxCalFile`
'''
import os
if spectralorder == 1:
print('DEPRECATION NOTICE 2013-04-25: This method will be superseded by readFluxCalFile')
print(' - available calibration files are those for the default position ony ')
Xank=anker[0]
Yank=anker[1]
# first get a list of the available calibration files, then
# select the best one based on the nearest position.
if test != None:
from scipy import interpolate
# get boresight anchor
if wheelpos == 160:
bsx, bsy = uvotgetspec.boresight("uc160")
elif wheelpos == 200:
bsx, bsy = uvotgetspec.boresight("ug200")
elif wheepos == 955:
bsx, bsy = uvotgetspec.boresight("vc955")
elif wheelpos == 1000:
bsx, bsy = uvotgetspec.boresight("vg1000")
# grab the spectral response for the center
sr_bs_func = XYSpecResp(wheelpos=wheelpos, spectralorder=spectralorder, anker = [bsx,bsy], chatter=chatter)
# get the normalised flux at the anchor position
Z = _specresp (wheelpos, spectralorder, arf1 = None, arf2 = None, chatter=0)
wmean, xresp = Z[:2]
fnorm = None
if spectralorder == 2:
print("WARNING XYSpecResp: anchor dependence second order response has not yet been implemented")
return SpecResp(wheelpos, spectralorder,)
calfiles = os.getenv('UVOTPY') + '/uvotpy'
if calfiles == '':
print("please define environment variable UVOTPY before proceeding")
raise
else:
calfiles += "/calfiles/"
status = os.system("ls -1 "+calfiles+" > tmp.1")
if status != 0:
print("FAIL: ls -1 "+calfiles+" > tmp.1")
f = open("tmp.1")
clist = f.readlines()
f.close()
status = os.system("rm tmp.1")
if len(clist) == 0:
print("WARNING XYSpecResp: calfiles directory seems empty")
if wheelpos == 160:
arf1 = 'swugu0160_1_20041120v999.arf'#'swugu0160_ax1130ay1030_dx70dy70_o1_20041120v001.arf'
arf2 = None
if wheelpos == 200:
arf1 = 'swugu0200_1_20041120v999.arf'#'swugu0200_ax1000ay1080_dx70dy70_o1_20041120v001.arf'
arf2 = None
if wheelpos == 955:
arf1 = 'swugv0955_1_20041120v999.arf'
arf2 = None
if wheelpos == 1000:
arf1 = 'swugv1000_1_20041120v999.arf'
arf2 = None
'''
if arf1 != None:
cl = arf1.split("_")
cl1 = arf1
_axy = [int(cl[1].split('ay')[0].split('ax')[1]),int(cl[1].split('ay')[1]) ]
_dxy = cl[2]
_ver = cl[4].split('.')[0].split('v') # [date,version]
_dist = (Xank-_axy[0])**2 + (Yank-_axy[1])**2
else:
_axy = [1100,1100]
_dxy = 'dx150dy150'
_ver = ['20041120', '001']
_dist = (Xank-_axy[0])**2 + (Yank-_axy[1])**2
if chatter > 2: print("initial: _axy = %s\n_dxy = %s\n_ver = %s\n_dist = %7.2f\n" %(_axy, _dxy, _ver, _dist))
for cl in clist:
cl1 = cl
cl = cl.split("\n")[0]
if chatter > 2: print('processing: ',cl)
#print 'wheelpos : ',cl[5:9]
try:
if int(cl[5:9]) == wheelpos:
cl = cl.split("_")
if (cl[3] == 'o'+str(spectralorder)):
#print "spectral order OK: ", cl[3]
axy = [int(cl[1].split('ay')[0].split('ax')[1]),int(cl[1].split('ay')[1]) ]
dxy = cl[2]
ver = cl[4].split('.')[0].split('v') # [date,version]
dist = (Xank-axy[0])**2 + (Yank-axy[1])**2
if chatter > 2: print("order=%i\naxy = %s\ndxy = %s\nver = %s\ndist = %7.2f\n_dist = %7.2f\n" %(spectralorder,axy, dxy, ver, dist,_dist))
if ((spectralorder == 1) & (dist < _dist)) :
arf1 = cl1
cl1 = arf1
print("1_____using "+arf1)
if ((spectralorder == 2) & (dist < _dist)) :
arf2 = cl1
cl1 = arf2
print("2_____using "+arf2)
if ((spectralorder == 1) & (dist == _dist) & (ver[1] > _ver[1])) :
arf1 = cl1
cl1 = arf1
print("3_____using "+arf1)
if ((spectralorder == 2) & (dist == _dist) & (ver[1] > _ver[1])) :
arf2 = cl1
cl1 = arf2
print("4_____using "+arf2)
except:
pass
'''
return SpecResp(wheelpos,spectralorder,arf1=arf1,arf2=arf2)
def _specresp (wheelpos, spectralorder, arf1 = None, arf2 = None, chatter=1):
''' Read the spectral response file [or a placeholder] and
Parameters
----------
wheelpos : int, {160,200,955,1000}
spectralorder : int, {1,2}
kwargs : dict
-------------
optional input
- **arf1** : str, optional
directs read of filename from $UVOTPY/calfiles/
if None, program reads from $UVOTPY/calfiles
if 'CALDB' read from $CALDB
- **arf2** : str, optional
regardless of input will read from $UVOTPY/calfiles/
Returns
-------
An interpolating function for the spectral response
as a function of wavelength (A)
Notes
-----
Will be superseded by `readFluxCalFile`
'''
import os
try:
from astropy.io import fits
except:
import pyfits as fits
import numpy as np
from scipy import interpolate
caldb = os.getenv("CALDB")
uvotpy = os.getenv("UVOTPY") + '/uvotpy'
if spectralorder == 1:
if wheelpos == 200:
if (arf1 == None):
arfdbfile = 'swugu200_1_20041120v999.arf'
EXTNAME='SPECRESPUGRISM200'
elif arf1 == 'CALDB':
arfdbfile = 'swugu0200_20041120v101.arf'
EXTNAME='SPECRESPUGRISM200'
else:
arfdbfile = arf1
EXTNAME='SPECRESPUGRISM200'
elif wheelpos == 160:
if (arf1 == None):
arfdbfile = 'swugu0160_1_20041120v999.arf'
EXTNAME='SPECRESPUGRISM160'
elif (arf1 == 'CALDB'):
arfdbfile = 'swugu0160_20041120v101.arf'
EXTNAME='SPECRESPUGRISM160'
else:
arfdbfile = arf1
EXTNAME='SPECRESPUGRISM160'
elif wheelpos == 1000:
if (arf1 == None):
arfdbfile = 'swugv1000_1_20041120v999.arf'
EXTNAME='SPECRESPVGRISM1000'
elif arf1 == 'CALDB':
arfdbfile = 'swugv1000_20041120v101.arf'
EXTNAME='SPECRESPVGRISM1000'
else:
arfdbfile = arf1
EXTNAME='SPECRESPVGRISM1000'
elif wheelpos == 955:
if (arf1 == None):
arfdbfile = 'swugv0955_1_20041120v999.arf'
EXTNAME='SPECRESPVGRISM955'
elif arf1 == 'CALDB':
arfdbfile = 'swugv0955_20041120v101.arf'
EXTNAME='SPECRESPVGRISM955'
else:
arfdbfile = arf1
EXTNAME='SPECRESPVGRISM955'
else:
print("FATAL: exposure header does not have filterwheel position encoded")
return
if (spectralorder == 2):
if wheelpos == 160:
EXTNAME = 'SPECRESP0160GRISM2NDORDER'
arfdbfile = 'swugu0160_2_20041120v999.arf'
elif wheelpos == 200:
EXTNAME = 'SPECRESP0200GRISM2NDORDER'
arfdbfile = 'swugu0200_2_20041120v999.arf'
elif wheelpos == 955:
EXTNAME = 'SPECRESP0955GRISM2NDORDER'
arfdbfile = 'swugv0955_2_20041120v999.arf'
elif wheelpos == 1000:
EXTNAME = 'SPECRESP1000GRISM2NDORDER'
arfdbfile = 'swugv1000_2_20041120v999.arf'
else:
print("FATAL: exposure header does not have filterwheel position encoded", wheelpos)
return
if arf2 != None: arfdbfile = arf2
#
# get spectral response [cm**2] from the ARF file
#
if (spectralorder == 2) | ((spectralorder == 1) & (arf1 != 'CALDB')):
if chatter > 0:
print("opening spectral response ARF file: "+uvotpy+'/calfiles/'+arfdbfile)
f = fits.open(uvotpy+'/calfiles/'+arfdbfile)
else:
#print "specResp: arf1 | arf2 parameter input = :"+arf1+' | '+arf2
dirstub = '/data/swift/uvota/cpf/arf/'
if chatter > 0:
print("opening spectral response ARF file: "+caldb+dirstub+arfdbfile)
f = fits.open(caldb+dirstub+arfdbfile)
if chatter > 0:
print('********************************************************************')
print('*** WARNING: EA ~10% low when no coi-correction is included ********')
print('*** WARNING: This means your flux might be too high by ~10% ********')
print('********************************************************************')
if chatter > 0:
print("Read in "+str(spectralorder)+" order ARF file")
print(f.info())
pext = f[0]
fext = f[1]
tab = fext.data
elo = tab.field('energ_lo')
ehi = tab.field('energ_hi')
wmin= tab.field('wave_min')
wmax= tab.field('wave_max')
xresp = tab.field('specresp') # response in cm2 per pixel
wmean = 0.5*(wmin+wmax)
q = np.isfinite(wmean) & np.isfinite(xresp)
wmean = wmean[q]
xresp = xresp[q]
if wmean[0] > wmean[-1]:
wmean = list(wmean)
wmean.reverse()
wmean=np.array(wmean)
xresp = list(xresp)
xresp.reverse()
xresp = np.array(xresp)
f.close()
return wmean, xresp
def SpecResp (wheelpos, spectralorder, arf1 = None, arf2 = None):
"""
Returns spectral response function
Parameters
----------
wheelpos : int, {160,200,955,1000}
spectralorder : int, {1,2}
kwargs : dict
-------------
optional input
- **arf1** : str, optional
directs read of filename from `$UVOTPY/calfiles/`
if None, program reads from `$UVOTPY/calfiles`
if `'CALDB'` read from `$CALDB`
- **arf2** : str, optional
regardless of input will read from `$UVOTPY/calfiles/`
Returns
-------
An interpolating function for the spectral response
as a function of wavelength (A)
Notes
-----
Use `readFluxCalFile()` in case the new calibration file is present
"""
from scipy import interpolate
Z = _specresp (wheelpos, spectralorder, arf1 = None, arf2 = None)
wmean, xresp = Z[:2]
specrespfunc = interpolate.interp1d(wmean, xresp, kind='linear', bounds_error=False )
return specrespfunc
def readFluxCalFile(wheelpos,anchor=None,option="default",spectralorder=1,
arf=None,msg="",chatter=0):
"""Read the new flux calibration file, or return None.
Parameters
----------
wheelpos : int, required
the position of the filterwheel
kwargs: dict
- **anchor** : list, optional
coordinate of the anchor
- **option** : str
option for output selection:
option=="default" + anchor==None: old flux calibration
option=="default" + anchor : nearest flux calibration + model extrapolation
option=="nearest" : return nearest flux calibration
option=="model" : model
- **spectralorder** : int
spectral order (1, or 2)
- **arf**: path
fully qualified path to a selected response file
- **msg**: str
buffer message list (to add to)
Returns
-------
None if not (yet) supported
option == 'model' returns the (astropy/pyfits) fits HDU (header+data) of the model
option == 'nearest'
returns the fits HDU of the nearest calfile
option == 'default' and anchor == None:
returns the fits HDU of the nearest calfile
option == 'default' and anchor position given (in detector coordinates)
returns the fits HDU and an
interpolating function fnorm(wave in A) for the flux correction
msg : string comments separated by \n
Notes
-----
2013-05-05 NPMKuin
"""
try:
from astropy.io import fits
except:
import pyfits as fits
import os
import sys
import numpy as np
from scipy import interpolate
typeNone = type(None)
grismname = "UGRISM"
if wheelpos > 500: grismname = "VGRISM"
if (type(anchor) != typeNone):
if (len(anchor) != 2):
sys.stderr.write("input parameter named anchor is not of length 2")
elif type(anchor) == str:
anchor = np.array(anchor, dtype=float)
check_extension = False
# here the "latest" version of the calibration files has been hardcoded
# latest update:
if spectralorder == 1:
if wheelpos == 200:
calfile = 'swugu0200_20041120v105.arf'
extname = "SPECRESPUGRISM200"
model = "ZEMAXMODEL_200"
elif wheelpos == 160:
calfile = 'swugu0160_20041120v105.arf'
extname = "SPECRESPUGRISM160"
model = "ZEMAXMODEL_160"
elif wheelpos == 955:
calfile = 'swugv0955_20041120v104.arf'
extname = "SPECRESPVGRISM0955"
model = "ZEMAXMODEL_955"
elif wheelpos == 1000:
calfile = 'swugv1000_20041120v105.arf'
extname = "SPECRESPVGRISM1000"
model = "ZEMAXMODEL_1000"
else:
raise RuntimeError( "FATAL: [uvotio.readFluxCalFile] invalid filterwheel position encoded" )
elif spectralorder == 2:
# HACK: force second order to 'nearest' option 2015-06-30
option == "nearest"
check_extension = True
if wheelpos == 200:
calfile = 'swugu0200_2_20041120v999.arf' #'swugu0200_20041120v105.arf'
extname = "SPECRESP0160GRISM2NDORDER"
model = ""
elif wheelpos == 160:
calfile = 'swugu0160_2_20041120v999.arf' #'swugu0160_20041120v105.arf'
extname = "SPECRESP0160GRISM2NDORDER"
model = ""
elif wheelpos == 955:
calfile = 'swugv0955_2_20041120v999.arf' #'swugv0955_20041120v104.arf'
extname = "SPECRESPVGRISM955"
model = ""
elif wheelpos == 1000:
calfile = 'swugv1000_2_20041120v999.arf' #swugv1000_20041120v105.arf'
extname = "SPECRESPVGRISM1000"
model = ""
else:
raise RuntimeError( "FATAL: [uvotio.readFluxCalFile] invalid filterwheel position encoded" )
if chatter > 3:
print("[uvotio.readFluxCalFile] "+calfile)
print(" extname="+extname)
print(" model="+model+"|")
else:
raise RuntimeError("spectral order not 1 or 2 - no effective area available")
if chatter > 1:
print("uvotio.readFluxCalFile attempt to read effective area file: ")
if arf != None:
if arf.upper() == "CALDB":
# try to get the file from the CALDB
os.getenv("CALDB")
command="quzcif swift uvota - "+grismname+\
" SPECRESP now now wheelpos.eq."+str(wheelpos)+" > quzcif.out"
os.system(command)
f = open("quzcif.out")
records = f.readlines()
f.close()
os.system("rm -f quzcif.out")
arf, extens = records[0].split()
arf = CALDB + "/data/swift/uvota/cpf/arf/"+arf
hdu = fits.open(arf)
else:
# path to arf is supplied
# the format must give the full path (starting with "/" plus FITS extension
# if no extension was supplied and there is only one, assume that's it.
# check version==2, using presence of CBD70001 keyword and see if spectral order is right
if chatter > 3: print(arf)
try: # get extension from path
if len(arf.split("+") ) == 2:
file, extens = arf.split("+")
elif len(arf.split("[") ) == 2:
file = arf.split("[")[0]
extens = arf.split("[")[1].split("]")[0]
else:
check_extension = True
arf = file
except:
raise IOError("The supplied effective area file name "+arf+" cannot be understood.")
hdu = fits.open(arf)
if check_extension: # old version file
if hdu[1].header['CBD60001'].split("(")[1].split(")")[0] != spectralorder:
raise IOError("The supplied effective area file is not correct spectral order.")
if ("CBD70001" not in hdu[extens].header) : # old version
print("Spectral order = %i. \t"%(spectralorder))
print("Using the oldest version of the effective area. \n"+\
"Flux, COI correction will be wrong.")
return hdu[extname],msg
else: # argument arf = None
uvotpy = os.getenv("UVOTPY") + '/uvotpy'
arf = os.path.join(uvotpy,"calfiles",calfile)
#try:
hdu = fits.open(arf)
if check_extension: # old version file
if hdu[1].header['CBD60001'].split("(")[1].split(")")[0] != spectralorder:
#raise IOError("The supplied effective area file is not correct spectral order.")
print("Spectral oder = %i:\t"%(spectralorder))
print("The supplied effective area file %s is not \n for the correct spectral order."%(arf))
if ("CBD70001" not in hdu[extname].header) : # old version
print("Using the oldest version of the effective area. \n"+\
"Flux, COI correction will be wrong.")
return hdu[extname],msg
#except:
# print "UVOTPY environment variable not set or calfiles directory entries missing"
# pass
# return None, msg
if chatter > 0: print("Spectral order = %i: using flux calibration file: %s"%(spectralorder,arf))
if chatter > 2: hdu.info()
msg += "Flux calibration file: %s\n"%(arf.split('/')[-1])
if (option == "default") | (option == "nearest"):
if type(anchor) == typeNone: # assume centre of detector
anchor = [1000,1000]
else:
if (option == "default"): modelhdu = hdu[model]
if wheelpos < 500:
n2 = 16
else: n2 = 12
names = [] # names extensions
calanchors = [] # anchor positions for each calibration extension
dist = [] # distances
for h in range(1,len(hdu)):
N = hdu[h].header["EXTNAME"].upper()
NL = N.split("_")
if (len(NL) == 3):
if( int(NL[2][1]) == spectralorder):
names.append(N)
root, ankxy, ord = NL
ankcal = ankxy.split("AY")
ankcal[0] = float(ankcal[0].split("AX")[1])
ankcal[1] = float(ankcal[1])
calanchors.append([ankcal[0],ankcal[1]])
dist.append( (ankcal[0]-anchor[0])**2+(ankcal[1]-anchor[1])**2 )
# now we have a list of valid extnames, and anchor positions
# for the calibration file fits-extensions
dist = np.array(dist)
k = np.where( dist == np.min(dist) )[0][0]
cal = hdu[names[k]]
print("Nearest effective area is %s - selected"%(names[k]))
msg += "Selected nearest effective area FITS extension %s\n"%(names[k])
if (option == "nearest"):
return cal, msg
try:
if chatter > 4:
print("ReadFluxCalFile: calanchor ", calanchors[k])
print("ReadFluxCalFile: anchor ", anchor)
print("ReadFluxCalFile: MODEL extname ", modelhdu.header['extname'])
modelcalflux = getZmxFlux (calanchors[k][0],calanchors[k][1],modelhdu)
modelobsflux = getZmxFlux (anchor[0],anchor[1],modelhdu)
q = np.isfinite(modelcalflux) & np.isfinite(modelobsflux)
w = 10.0*modelhdu.data['WAVE']
if chatter > 4:
print("ReadFluxCalFile: check: ")
print("ReadFluxCalFile: w.shape ",w.shape)
print("ReadFluxCalFile: =784*",n2," ?")
w = w.reshape(n2,784)[q,0]
fn = modelobsflux[q]/modelcalflux[q]
w1 = 1650.0
f1 = 1.0 # was f1 = (fn[1]-fn[0])/(w[1]-w[0])*(w1-w[0]) + fn[0]
n = len(w)+2
x = np.zeros(n,dtype=float)
y = np.zeros(n,dtype=float)
x[0] = w1
y[0] = f1
x[1:-1] = w
y[1:-1] = fn
x[-1] = 7000.
y[-1] = y[-2]
y[ y < 0 ] = 0.0
fnorm = interpolate.interp1d(x,y,bounds_error=False, fill_value=0.)
msg += "Flux corrected for variation over detector using model\n"
return cal, fnorm, msg
except RuntimeError:
pass
print("WARNING: Failure to use the model for inter/extrapolation of the calibrated locations.")
print(" Using Nearest Eaafective Area File for the Flux calibration.")
fnorm = interpolate.interp1d([1600,7000],[1.,1.],)
return cal, fnorm, msg
elif option == "model":
return hdu[model]
else:
raise RuntimeError( "invalid option passed to readFluxCalFile")
def getZmxFlux(x,y,model,ip=1):
'''Interpolate model to get normalized flux.
parameters
----------
x, y : float
anchor coordinate x,y to find an interpolated solution to the model
model : fits structure
binary table extension (header + data)
fields are wave, xpix, ypix, flux
ip : int
The order of the interpolation (1=linear, 2=quadratic, 3=cubic)
returns
-------
flux interpolated at (x,y) in (xpix, ypix) as function of wave
'''
import numpy as np
from scipy import interpolate
# test input
if not ((type(model) != 'astropy.io.fits.hdu.table.BinTableHDU') | \
(type(model) != 'pyfits.hdu.table.BinTableHDU') ):
raise IOError("getZmxFlux model parameter is not a proper FITS HDU bintable type")
n3 = 28*28
n2 = int(model.header['NAXIS2']/n3)
if not ((n2 == 12) | (n2 == 16)):
raise IOError("getZmxFlux: size of array in MODEL not correct; perhaps file corrupt?")
zmxwav = model.data['WAVE']
xp = model.data['XPIX']
yp = model.data['YPIX']
zmxflux = model.data['FLUX']
zmxwav = zmxwav.reshape(n2,n3)[:,0]
xp = xp.reshape(n2,n3)
yp = yp.reshape(n2,n3)
zmxflux = zmxflux.reshape(n2,n3)
flux = np.zeros(n2,dtype=float)
dminx = 0
dminy = -100
dmax = 2100
for j2 in range(n2): # loop over wavelengths
# filter out outliers
q = (xp[j2,:] > dminx) & (xp[j2,:] < dmax) & (yp[j2,:] > dminy) & (yp[j2,:] < dmax)
if len(np.where(q)[0]) < 17:
print("getZmxFlux warning: at wavelength=",zmxwav[j2]," not sufficient valid points found")
fx = xp[j2,q]
fy = yp[j2,q]
ff = zmxflux[j2,q]
try:
tck = interpolate.bisplrep(fx,fy,ff,xb=dminx,xe=dmax,yb=dminy,ye=dmax,kx=ip,ky=ip,)
flux[j2] = interpolate.bisplev(x, y, tck)
except:
raise RuntimeError ("getZmxFlux ERROR in interpolation")
return flux
def uvotify (spectrum, fileout=None, disp=None, wheelpos=160, lsffile=None, clean=True, chatter=1, clobber=False):
'''
Fold a high resolution input spectrum through the uvot spectral
response to "uvotify" the spectrum.
Parameters
----------
spectrum : path, str
file name ASCII file, two columns wave(A), flux
kwargs : dict
-------------
- **fileout** : path, str, optional
if given, the new spectrum will be written to an output file (ascii)
- **wheelpos** : int
the filterwheel position for selecting typical dispersion
- **disp** : ndarray, optional
dispersion coefficients (will be approximated if not given)
- **lsffile** : path, str, optional
the file with LSF data. If not given the file in $UVOTPY/calfiles
will be used.
- **clean** : bool
if True remove invalid data
- **chatter** : int
verbosity
- **clobber** : bool
if True overwrite exisiting file
Returns
-------
(status, wave, flux) :
- status = 0 success
- wavelength and flux arrays convolved with the
uvot spectral response.
- 2012-02-09 NPMK start development
'''
try:
from astropy.io import fits
except:
import pyfits as fits
import numpy as np
from uvotmisc import rdTab
import os
from scipy.ndimage import convolve
import uvotio
import uvotgetspec
from scipy import interpolate
import datetime
version = '120209'
status = 0
now = datetime.date.today()
datestring = now.isoformat()[0:4]+now.isoformat()[5:7]+now.isoformat()[8:10]
if disp == None:
# typical dispersion forst order (use for whole detector):
if wheelpos == 160:
disp = np.array([4.1973e-10,-1.3010e-6,1.4366e-3,3.2537,2607.6])
elif wheelpos == 200:
disp = np.array([4.1973e-10,-1.3010e-6,1.4366e-3,3.2537,2607.6]) # placeholder
elif wheelpos == 955:
disp = np.array([4.1973e-10,-1.3010e-6,1.4366e-3,3.2537,2607.6]) # placeholder
elif wheelpos == 1000:
disp = np.array([4.1973e-10,-1.3010e-6,1.4366e-3,3.2537,2607.6]) # placeholder
else:
print("error in wheelpos argument")
status = 1
tempfile = 'ab9804573234isfkjldsf.tmp'
status = os.system('file '+spectrum+' > '+tempfile )
if status == 0:
f = open(tempfile)
line = f.readline()
f.close()
os.system('rm '+tempfile)
filetype = (line.split(':')[1]).split()[0]
if chatter > 0: print("spectrum file type = "+filetype)
if filetype == 'ASCII':
try:
spect = rdTab(spectrum)
wave = spect[:,0]
flux = spect[:,1]
q = np.isfinite(flux) & np.isfinite(wave)
wav1 = wave[q]
flx1 = flux[q]
if len(wav1) < 1:
status = 1
print("number of valid wavelengths is zero")
except:
status = 1
print("failure reading the spectrum with routine rdTab. Make sure that the format is right.")
elif filetype == 'FITS':
status = 1
print("filetype not supported")
# future support for fits single column spectral format
# and wave/flux columns in table SPECTRUM
else:
status = 1
print("filetype not supported")
else:
print("error reading file type ")
instrument_fwhm = 2.7/0.54 # in pix units
gg = uvotgetspec.singlegaussian(np.arange(-12,12),1.0,0.,instrument_fwhm)
gg = gg/gg.sum().flatten() # normalised gaussian
if status == 0:
NN = len(wav1) # number good points in the spectrum
if NN < 3:
print("uvotify: not enough valid data points. ")
return
if lsffile == None:
UVOTPY = os.getenv('UVOTPY') + '/uvotpy'
if UVOTPY == '':
print('The UVOTPY environment variable has not been set')
lsffile = UVOTPY+'/calfiles/zemaxlsf.fit'
lsffile = fits.open( lsffile )
tlsf = lsffile[1].data
lsfchan = tlsf.field('channel')[0:15] # energy value
lsfwav = uvotio.kev2angstrom(lsfchan) # wavelength
epix = tlsf.field('epix')[0,:] # offset in half pixels
lsfdata = tlsf.field('lsf')[:15,:] # every half pixel a value
lsffile.close()
# define the LSF(wave)
# convolve flux(wave) * LSF(wave) function
flux1 = np.zeros(NN)
for k in range(NN):
# find index e in lsfchan and interpolate lsf
w = wave[k]
j = lsfwav.searchsorted(w)
if j == 0:
lsf = lsfdata[0,:].flatten()
elif ((j > 0) & (j < 15) ):
e1 = lsfwav[j-1]
e2 = lsfwav[j]
frac = (w-e1)/(e2-e1)
lsf1 = lsfdata[j-1,:]
lsf2 = lsfdata[j,:]
lsf = ((1-frac) * lsf1 + frac * lsf2).flatten()
else:
# j = 15
lsf = lsfdata[14,:].flatten()
# convolution lsf with instrument_fwhm
lsf_con = convolve(lsf,gg.copy(),)
# assign wave to lsf_func relative to w at index k
# rescale lsfcon from half-pixels to channels
d = np.arange(-79,79)*0.5 + (uvotgetspec.pix_from_wave(disp, w))[0]
wave1 = np.polyval(disp,d)
# wave1 is an increasing function - if not, the interpolating function fails.
lsf_func = interpolate.interp1d(wave1, lsf_con,bounds_error=False,fill_value=0.0)
loli = k-39
upli = k+39
if loli < 0: loli = 0
if upli > NN: upli = NN
norm = np.asarray( lsf_func(wav1[loli:upli]) ).sum()
flux1[k] = (flx1[loli:upli] * lsf_func(wav1[loli:upli]) / norm ).sum()
if clean:
flux = flux1
wave = wav1
else:
flux[where(q)] = flux1
flux[where(not q)] = np.nan
if fileout == None:
return status, wave, flux
else:
f = open(fileout,'w')
q = np.isfinite(wave) & np.isfinite(flux)
wave = wave[q]
flux = flux[q]
for i in range(len(wave)): f.write("%8.2f %13.5e\n" % (wave[i],flux[i]) )
f.close()
else:
return status
def writeSpectrum(ra,dec,filestub,ext, Y, fileoutstub=None,
arf1=None, arf2=None, fit_second=True, write_rmffile=True,
used_lenticular=True, fileversion=1, calibration_mode=True,
history=None, chatter=1, clobber=False ) :
'''Write a standard UVOT output file - Curved extraction only, not optimal extraction.
Parameters
----------
ra,dec : float, float
position in decimal degrees
filestub : str
"sw" + obsid
ext : int
extension number
Y : tuple
compound variable with spectral data from uvotgetspec
kwargs : dict
-------------
- **fileoutstub** : str, optional
stub for the output file name
- **arf1** : str, optional
if 'CALDB', use the caldb effective area file
- **arf2** : str, optional
if 'CALDB' use the caldb effective area file
- **fit_second** : bool
if `True` tried extracting the second order
- **write_rmffile** : bool
write RMF output if True (slow)
- **history** : list
list of processing history messages to write to header
- **chatter** : int
- **clobber** : bool
overwrite files
Returns
-------
Writes the output file only.
Notes
-----
**Output file composition**
For details, see the output file format description.
Main header:
-----------
wheelpos, filter, orders, author
First extension
---------------
For fileversion=1:
The first extension is named SPECTRUM (future: 'FIRST_ORDER_PHA_SPECTRUM') and
contains the standard input for XSPEC,
- channel number,
- measured uncorrected counts per channel, total counts not background corrected
Includes processing HISTORY records from `getSpec()`
Errors are assumed to be poisonian
Modifications for fileversion=2:
The output now corrects for coincidence loss, and is in the form of
- photon rate per second per channel with all known corrections (aperture,
sensitivity, coincidence-loss)
- errors, including aperture correction, coincidence-loss
correction (non-poissonian).
Second extension
----------------
The second extension is named CALSPEC (future: 'FIRST_ORDER_NET_SPECTRUM')
contains the standard input for IDL/PYTHON with
For fileversion=1:
First Order:
- pixelno(relative to anchor),
- wave(\AA),
- net_count_rate(s-1 channel-1), corrected for PSF, integrated normal
- left_background_rate(s-1 channel-1),
- right_background_rate (s-1 channel-1),
- flux(erg cm-2 s-1 A-1), corrected for aperture, coincidence, sensitivity
- flux_err(erg cm-2 s-1 A-1),
- quality flag,
- aper1corr
Second order:
- wave2,
- net2rate,(s-1 channel-1)
- bg2rate, (s-1 channel-1)
- flux2, not calibrated, approximate (erg cm-2 s-1 A-1)
- flux2err,
- qual2,
- aper2corr
- coincidence-loss factor as applied to the flux listed.
Modifications for fileversion=2:
The first extension now containts the corrected
count rates, so the uncorrected count rates are now put in the second extension.
The flux and flux error still have all corrections applied.
- net count rate per second per channel in extraction track, uncorrected
- bg1rate is now the background rate per second per channel in extraction track,
uncorrected, new column
- bg1rate and bg2rate (count/s/pix) are the mean rates measured
in the two backgrounds and need to be multiplied by the spectra track
width in pixels for comparison to the background rate.
Further modifications happeb when the calibration_mode is set.
Third extension
---------------
The third extension named 'SPECTRUM_IMAGE' contains the image of the total spectrum
A fourth extension may exist for spectra from summed images
and contains the **exposure map**
**history**
- 2011-12-10 NPM Kuin
- 2012-03-05 fixed error in calculation bg1rate(a.k.a. bg_r) and bg2rate (or bg_l)
- 2012-09-14 added coi correction
- 2013-03-06 edited header
- 2015-02-13 change quality flag in SPECTRUM extension to conform to XSPEC range
'''
try:
from astropy.io import fits
except:
import pyfits as fits
import datetime
import numpy as np
from scipy import interpolate
import os
from pylab import polyval
import uvotgetspec
now = datetime.date.today()
rnu = now.day*1.2+now.month*0.99+now.year*0.3
version = '120914'
h_planck = 6.626e-27 # erg/s
lightspeed = 2.9979e10 # cm/sec
h_c_ang = h_planck * lightspeed * 1e8 # ergs-Angstrom
ch = chatter
trackwidth = uvotgetspec.trackwidth # half-width of the extraction in terms of sigma
wave2 = None
sp2netcnt = None
bg2netcnt = None
wave3 = None
quality = None
qflags = uvotgetspec.quality_flags()
################# prepare data for writing to output file
############## main curved slit extraction #####################
#
# unpack data
if type(Y) == dict:
Yfit = Y["Yfit"]
hdr = Y['hdr']
wheelpos = Y['wheelpos']
offset = Y['offset']
co_first = Yfit['co_first']
sp_first = Yfit['sp_first']
bg_first = Yfit['bg_first']
co_second = Yfit['co_second']
sp_second = Yfit['sp_second']
bg_second = Yfit['bg_second']
co_back = Yfit['co_back']
apercorr = Yfit['apercorr']
qquality = Yfit['quality']
expospec = Yfit['expospec']
C_1 = Y['C_1']
C_2 = Y['C_2']
x = Yfit['x']
q1 = Yfit['q1']
q2 = Yfit['q2']
sig1coef = Yfit['sig1coef']
sig2coef = Yfit['sig2coef']
present2 = Yfit["present2"]
dist12 = Y['dist12']
anker = Y['anker']
coi_level = Y['coi_level']
background_strip1 = Y["background_1"]
background_strip2 = Y["background_2"]
phx = Y['Xphi']
phy = Y['Yphi']
anker = Y['anker']
offset = Y['offset']
ank_c = Y['ank_c']
exposure = hdr['exposure']
extimg = Y['extimg']
expmap = Y['expmap']
zeroxy = Y['zeroxy_imgpos']
# if present background_template extimg is in Y['template']
effarea1 = Y['effarea1']
effarea2 = Y['effarea2']
else:
# this will be removed soon in favor of the dictionary passing method
(Y0,Y1,Y2,Y4) = Y
(filestuff), (method),(phistuff), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra = Y0
if filestuff != None:
(specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile) = filestuff
(Xphi, Yphi, date1) = phistuff
else:
Xphi,Yphi = 0,0
( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
(C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
wheelpos = hdr['WHEELPOS']
background_strip1 = bg1
background_strip2 = bg2
((present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first ,co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third ,co_third ),(
x,xstart,xend,sp_all,qquality,co_back) ), (
coef0,coef1,coef2,coef3), (
bg_zeroth,bg_first,bg_second,bg_third), (
borderup,borderdown),apercorr, expospec = Y2
if Y4 != None:
wav2p, dis2p, rate2p, qual2p, dist12p = Y4[0]
phx, phy = anker_field
zeroxy = [1000,1000]
effarea1 = None
effarea2 = None
# ensure that offset is a scalar
try:
offset = offset[0]
except:
pass
# first order
# full arrays
bkg = bg_first[q1[0]] # background at spectrum integrated across width used in sp_first
counts = sp_first[q1[0]] # includes background counts
aper1corr = apercorr[1, q1[0] ] # aperture factor to multiply counts with
quality = qquality[q1[0]]
exposure=hdr['exposure']
expospec1 = expospec[1,q1[0]].flatten()
wave = polyval(C_1, x[q1[0]])
senscorr = sensitivityCorrection(hdr['tstart'],wave=wave,wheelpos=wheelpos)
background_strip1 = background_strip1[q1[0]]
background_strip2 = background_strip2[q1[0]]
spwidup1 = borderup[1,q1[0]]
spwiddown1 = borderdown[1,q1[0]]
if wheelpos < 500:
qwave = np.where( (wave > 1660.0) & (wave < 6800) )
else:
qwave = np.where( (wave > 2600.0) & (wave < 6400) )
# filtered arrays
dis = x[q1[0]][qwave]
bkg = bkg[qwave]
counts = counts[qwave]
aper1corr = aper1corr[qwave]
expospec1 = expospec1[qwave]
wave = wave[qwave]
spwidup1 = spwidup1[qwave]
spwiddown1 = spwiddown1[qwave]
if type(senscorr) == np.ndarray: senscorr = senscorr[qwave]
quality = quality[qwave]
background_strip1 = background_strip1[qwave]
background_strip2 = background_strip2[qwave]
# set up channel numbers and reverse for increasing energy
NW = len(wave)
aa = np.arange(NW)
rc = list(range(NW))
rc.reverse()
channel = aa+1
ax = list(range(NW-1))
ax1= list(range(1,NW))
dis1 = uvotgetspec.pix_from_wave(C_1,wave, spectralorder=1)
binwidth = np.polyval(C_1,dis1+0.5) - np.polyval(C_1,dis1-0.5)# width of each bin in A (= scale A/pix)
# derive corrected quantities (sensitivity, aperture, functions for coincidence)
sprate = (counts-bkg)*aper1corr/expospec1 # net count rate, PSF aperture corrected
sp1rate_err = np.sqrt(
np.abs( (bkg*2.5/trackwidth+ # corrected bakcground to 2.5 sigma trackwidth
(counts-bkg)*aper1corr)*senscorr # using PSF aperture correction on net rate
*(1+apcorr_errf(trackwidth,wheelpos))) # error in aperture correction
)/expospec1 # neglects coi-error in rate error
bg1rate = bkg/expospec1*2.5/trackwidth # background counts for 2.5 sigma width at spectrum
co_sp1rate = (co_first[q1[0]][qwave]/expospec1).flatten()
co_bgrate = (co_back [q1[0]][qwave]/expospec1).flatten()
print("writing output file: computing coincidence loss spectrum, frametime=", hdr['framtime'])
# calculate coincidence correction for coi box
fcoi, coi_valid1 = uvotgetspec.coi_func(dis,wave,
co_sp1rate,
co_bgrate,
area = 414.,
wheelpos = wheelpos,
frametime=hdr['framtime'],
background=False,
debug=False,chatter=1)
print("writing output file: computing coincidence loss background")
bgcoi = uvotgetspec.coi_func(dis,wave,
co_sp1rate,
co_bgrate,
area = 414.,
wheelpos = wheelpos,
frametime=hdr['framtime'],
background=True, \
debug=False,chatter=1)
if len(coi_valid1) == len(quality):
quality[coi_valid1 == False] = qflags["too_bright"]
else:
raise RuntimeError("the quality and coi_valid1 arrays are of different length")
sprate = sprate*senscorr
bg1rate = bg1rate*senscorr
# get effective area function
if arf1 != None:
specresp1func = SpecResp(hdr['wheelpos'], 1, arf1=arf1, arf2=arf2 )
else:
# position dependent response function [ should get more from rate2flux - too
# much duplication right now !
specresp1func = rate2flux(wave, sprate, wheelpos,
bkgrate=bg1rate,
pixno=None,
respfunc=True,
arf1=None,
arf2=None,
effarea1=effarea1,
effarea2=effarea2,
spectralorder=1,
anker=anker,
test=None,
option=1,
fudgespec=1.,
frametime=hdr['framtime'],
co_sprate = (co_first[q1[0]][qwave]/expospec1),
co_bgrate = (co_back[q1[0]][qwave]/expospec1),
debug=False,
chatter=1)
#specresp1func = XYSpecResp(wheelpos=hdr['wheelpos'],spectralorder=1, anker=anker)#Xank=anker[0], Yank=anker[1])
hnu = h_c_ang/(wave)
# coi-corrected flux
flux = hnu*sprate*fcoi(wave)/specresp1func(wave)/binwidth # [erg/s/cm2/angstrom] # coi correction
flux_err = hnu*sp1rate_err*fcoi(wave)/specresp1func(wave)/binwidth
back1rate = (background_strip1*2*trackwidth*np.polyval(sig1coef,x[q1[0]])[qwave]/expospec1) # estimate bg1 * width
back2rate = (background_strip2*2*trackwidth*np.polyval(sig1coef,x[q1[0]])[qwave]/expospec1) # estimate # prepare for output
'''
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(dis,sprate*expospec1*fcoi(wave),'r-')
plt.plot(dis,bkg*aper1corr*senscorr*bgcoi(wave),'b--')
plt.plot(dis,bg1rate*bgcoi(wave)*expospec1*aper1corr,'g--')
plt.plot(dis,back2rate*expospec1*senscorr*bgcoi(wave)*aper1corr,'g-.')
plt.plot(dis,back1rate*expospec1*senscorr*bgcoi(wave)*aper1corr,'g:')
plt.show()
'''
xspec_quality = quality
xspec_quality[xspec_quality > 1] = np.array(np.log2(xspec_quality[xspec_quality > 1]),dtype=int)
# extname_order
if fileversion == 1:
spectrum_first = (channel,
counts[rc],
(np.sqrt(counts*aper1corr))[rc],
xspec_quality[rc],
aper1corr[rc],
expospec1[rc], )
back_first = (channel, bkg[rc], 0.1*np.sqrt(bkg)[rc],
xspec_quality[rc],aper1corr[rc], expospec1[rc])
calspec_first = (dis,wave,
sprate*fcoi(wave), # fully corrected counts in the spectrum
bg1rate*aper1corr*bgcoi(wave)*trackwidth/2.5, # fully corrected counts in the spectrum
back1rate*bgcoi(wave)*senscorr*aper1corr, # fully corrected counts in the spectrum
back2rate*bgcoi(wave)*senscorr*aper1corr, # fully corrected counts in the spectrum
flux, flux_err, # fully corrected flux
quality,
aper1corr,
expospec1,
fcoi(wave),
bgcoi(wave),
senscorr,
binwidth,
specresp1func(wave),
spwidup1,spwiddown1,
angle,)
elif fileversion == 2:
spectrum_first = (channel,
sprate[rc]*fcoi(wave[rc]),
sp1rate_err[rc]*fcoi(wave[rc]),
xspec_quality[rc] )
back_first = (channel, bg1rate[rc], 0.1*np.sqrt(bg1rate)[rc],
xspec_quality[rc],aper1corr[rc], expospec1[rc])
if not calibration_mode:
calspec_first = (dis,wave,
(counts-bkg)/expospec1, bkg/expospec1, # uncorrected counts in the spectrum
[back1rate,back2rate],
flux, flux_err, # fully corrected flux
quality,
aper1corr,
expospec1,
fcoi(wave),
bgcoi(wave))
else:
calspec_first = (dis,wave,
(counts-bkg)/expospec1, bkg/expospec1, # uncorrected counts in the spectrum
[back1rate,back2rate],
flux, flux_err, # fully corrected flux
quality,
aper1corr,
expospec1,
fcoi(wave),
bgcoi(wave),
co_sp1rate, co_bgrate
)
############### second order
# predicted second order
# already rate in array
# measured second order
if present2 & fit_second:
wave2 = polyval(C_2, x[q2[0]]-dist12) # 2nd order wavelengths
senscorr2 = sensitivityCorrection(hdr['tstart'],wave=wave2,wheelpos=wheelpos)# just basic 1%/yr
channel2 = np.arange(len(wave2)) +1
rc2 = list(range(len(wave2)))
rc2.reverse()
sp2counts = sp_second[q2[0]] # total counts in second order
aper2corr = apercorr[2, q2[0]].flatten() # aperture correction second order
expospec2 = expospec[2, q2[0]].flatten()
bg_2cnt = bg_second[q2[0]] # background counts second order
sp2netcnt = sp2counts - bg_2cnt # net counts, uncorrected
sp2rate = sp2netcnt * aper2corr / expospec2 # net count rate, aperture corrected
sp2rate_err = np.sqrt(
np.abs(
(bg_2cnt*2.5/trackwidth+sp2netcnt*aper2corr)*
senscorr2*(1+
apcorr_errf(trackwidth,wheelpos) )))/expospec2 # fully corrected error in net count rate
bg_2rate = bg_2cnt /expospec2 * 2.5/trackwidth # aperture corrected background rate
qual2 = qquality[q2[0]] # quality at second order (shared with first order)
dis2 = uvotgetspec.pix_from_wave( C_2, wave2,spectralorder=2 )
binwidth2 = np.polyval(C_2,dis2+0.5) - np.polyval(C_2,dis2-0.5)
pix2 = x[q2[0]] # pixel number to align with first order
fcoi_2, coi_valid2 = uvotgetspec.coi_func(pix2,wave2,
(co_second[q2[0]]/expospec2).flatten(),
(co_back [q2[0]]/expospec2).flatten(),
area = 414.,
wheelpos=wheelpos,
coi_length=29,
frametime=hdr['framtime'],
background=False,
debug=False,chatter=1)
bgcoi2 = uvotgetspec.coi_func(pix2,wave2,
(co_second[q2[0]]/expospec2).flatten(),
(co_back[q2[0]]/expospec2).flatten(),
area = 414.,
wheelpos=wheelpos,
coi_length=29,
frametime=hdr['framtime'],
background=True,
debug=False,chatter=1)
if len(coi_valid2) == len(qual2):
qual2[coi_valid2 == False] = qflags["too_bright"]
else:
raise RuntimeError("the qual2 and coi_valid2 arrays are of different length")
sp2rate = sp2rate * senscorr2 # perform sensitivity loss correction
bg_2rate = bg_2rate * senscorr2
#specresp1func = SpecResp(hdr['wheelpos'], 1, arf1=arf1, arf2=arf2 )
if arf2 != None:
specresp2func = SpecResp(hdr['wheelpos'], 2, arf1=arf1, arf2=arf2 )
else:
# position dependent response function
specresp2func = XYSpecResp(wheelpos=hdr['wheelpos'],spectralorder=2, anker=anker2 ) #~~TODO:
hnu2 = h_c_ang/(wave2)
flux2 = hnu2*sp2rate*fcoi_2(wave2)/specresp2func(wave2)/binwidth2 # corrected [erg/s/cm2/angstrom]
flux2_err = hnu2*sp2rate_err*fcoi_2(wave2)/specresp2func(wave2)/binwidth2
xspec_qual2 = qual2
xspec_qual2[xspec_qual2 > 1] = np.array(np.log2(xspec_qual2[xspec_qual2 > 1]),dtype=int)
# collect data for output
if fileversion == 1:
sp2counts[np.where(sp2counts<0)] = 0 #~TODO:
spectrum_second = (channel2, sp2counts[rc2],
(np.sqrt(sp2counts))[rc2], xspec_qual2[rc2],
aper2corr[rc2], expospec2[rc2] )
back_second = (channel2, bg_2cnt[rc2],
0.1*(np.sqrt(bg_2cnt))[rc2],
xspec_qual2[rc2], aper2corr[rc2],expospec2[rc2] )
calspec_second = (pix2,wave2,
sp2rate*fcoi_2(wave2),
bg_2rate*fcoi_2(wave2),
flux2,flux2_err,
qual2,aper2corr,
expospec2,fcoi_2(wave2),bgcoi2(wave2),
senscorr2,binwidth2,specresp2func(wave2))
else:
spectrum_second = (channel2,
sp2rate[rc2]*fcoi_2(wave2[rc2]),
sp2rate_err[rc2]*fcoi_2(wave2[rc2]),
qual2[rc2])
back_second = (channel2, bg_2rate[rc2]*bgcoi2(wave2[rc2]),
0.1*(np.sqrt(bg_2cnt))[rc2],
qual2[rc2] )
calspec_second = (pix2,wave2,
(sp2counts-bg_2cnt)/expospec2,
bg_2cnt/expospec2,
flux2,flux2_err,
qual2,aper2corr,
expospec2,fcoi_2(wave2),bgcoi2(wave2))
else:
spectrum_second = None
back_second = None
calspec_second = None
if hdr['wheelpos'] > 500: present2 = False
############### FILES Define the input/ output file names
obsid = filestub
if fileoutstub != None:
obsid = fileoutstub
if chatter > 2: print("output file name base is now:",obsid)
if used_lenticular:
lent='_f'
else:
lent='_g'
if hdr['wheelpos'] == 200:
outfile1 = obsid+'ugu_1ord_'+str(ext)+lent+'.pha'
backfile1 = obsid+'ugu_1ord_'+str(ext)+lent+'_back.pha'
outfile2 = obsid+'ugu_2ord_'+str(ext)+lent+'.pha'
backfile2 = obsid+'ugu_2ord_'+str(ext)+lent+'_back.pha'
rmffile1 = obsid+'ugu_1ord_'+str(ext)+lent+'.rmf'
rmffile2 = obsid+'ugu_2ord_'+str(ext)+lent+'.rmf'
elif hdr['wheelpos'] == 160:
outfile1 = obsid+'ugu_1ord_'+str(ext)+lent+'.pha'
backfile1 = obsid+'ugu_1ord_'+str(ext)+lent+'_back.pha'
outfile2 = obsid+'ugu_2ord_'+str(ext)+lent+'.pha'
backfile2 = obsid+'ugu_2ord_'+str(ext)+lent+'_back.pha'
rmffile1 = obsid+'ugu_1ord_'+str(ext)+lent+'.rmf'
rmffile2 = obsid+'ugu_2ord_'+str(ext)+lent+'.rmf'
elif hdr['wheelpos'] == 1000:
outfile1 = obsid+'ugv_1ord_'+str(ext)+lent+'.pha'
backfile1 = obsid+'ugv_1ord_'+str(ext)+lent+'_back.pha'
outfile2 = obsid+'ugv_2ord_'+str(ext)+lent+'.pha'
backfile2 = obsid+'ugv_2ord_'+str(ext)+lent+'_back.pha'
rmffile1 = obsid+'ugv_1ord_'+str(ext)+lent+'.rmf'
rmffile2 = obsid+'ugv_2ord_'+str(ext)+lent+'.rmf'
elif hdr['wheelpos'] == 955:
outfile1 = obsid+'ugv_1ord_'+str(ext)+lent+'.pha'
backfile1 = obsid+'ugv_1ord_'+str(ext)+lent+'_back.pha'
outfile2 = obsid+'ugv_2ord_'+str(ext)+lent+'.pha'
backfile2 = obsid+'ugv_2ord_'+str(ext)+lent+'_back.pha'
rmffile1 = obsid+'ugv_1ord_'+str(ext)+lent+'.rmf'
rmffile2 = obsid+'ugv_2ord_'+str(ext)+lent+'.rmf'
else:
print("FATAL: exposure header does not have filterwheel position encoded")
return
obsid = filestub
# test for presence of outfile and clobber not set
if clobber == False:
if (os.access(outfile1,os.F_OK) ^ os.access(backfile1,os.F_OK)
^ os.access(outfile2,os.F_OK) ^ os.access(backfile2,os.F_OK)):
print('Error: output file already present. ')
if write_rmffile & (not os.access(rmffile1,os.F_OK)):
write_rmf_file (rmffile1, wave, hdr['wheelpos'], C_1,
anchor=anker, clobber=clobber,chatter=chatter )
if present2 & fit_second & write_rmffile & (not os.access(rmffile2,os.F_OK)):
# write_rmf_file (rmffile2, wave2, hdr['wheelpos'],2, C_2,
# arf1=None, arf2=arf2, clobber=clobber,chatter=chatter )
print("no RMF file for second order available")
if interactive:
answer = input(' DO YOU WANT TO REWRITE THE OUTPUT FILES (answer yes/NO)? ')
if len(answer) < 1: answer = 'NO'
answer = answer.upper()
if (answer == 'Y') ^ (answer == 'YES'): clobber = True
if clobber == False: return
filetag = obsid+'_'+str(hdr['wheelpos'])+str(rnu)
writeSpectrum_ (ra,dec,obsid,ext,hdr,anker,phx,phy,offset,
ank_c, exposure, history, spectrum_first, back_first,
calspec_first, extimg, outfile1, backfile1, rmffile1,
outfile2=outfile2, backfile2=backfile2, rmffile2=rmffile2,
expmap=expmap, spectrum_second = spectrum_second,
back_second = back_second, calspec_second=calspec_second,
present2=(present2 & fit_second), fileversion=fileversion,
zeroxy=zeroxy, calmode=calibration_mode,
clobber=clobber, chatter=chatter)
if write_rmffile:
write_rmf_file (rmffile1, wave, hdr['wheelpos'], C_1, effarea1=effarea1,
anchor=anker, clobber=clobber,chatter=chatter )
if present2 & fit_second & write_rmffile:
#write_rmf_file (rmffile2, wave2, hdr['wheelpos'],2, C_2,
# arf1=None, arf2=arf2, clobber=clobber,chatter=chatter )
print("no RMF file for second order available")
def apcorr_errf(trackwidth,wheelpos):
"The additional RMS percentage rate error when making an aperture correction"
x = (trackwidth-2.5)
if wheelpos < 500:
if x < 1.1: return 5.0
if x < 2.0: return 3.0
return 3.0*x
else:
if x < 1.1: return 8.0
if x < 2.0: return 5.0
return 5.0*x
def writeSpectrum_ (ra,dec,obsid,ext,hdr,anker,phx,phy,offset, ank_c, exposure,
history, spectrum_first, back_first, calspec_first, extimg, outfile1,
backfile1, rmffile1, outfile2=None, backfile2=None, rmffile2=None, expmap=None,
spectrum_second = None, back_second = None, calspec_second=None, present2=True,
fileversion = 1, zeroxy=[1000,1000], calmode=False,
clobber=False, chatter=0 ):
'''performs the actual creation of the output file (mostly FITS stuff)
See writeSpectrum() for the parameter meanings
'''
try:
from astropy.io import fits
except:
import pyfits as fits
import datetime
import numpy as N
from scipy import interpolate
import os
from pylab import polyval
from uvotgetspec import get_coi_box
version = '140723'
now = datetime.date.today()
datestring = now.isoformat()[0:4]+now.isoformat()[5:7]+now.isoformat()[8:10]
rnu = int(now.day*1.2+now.month*0.99+now.year*0.3) # some number
# coincidence loss box
coi_half_width,coi_length,coifactor = get_coi_box(hdr['wheelpos'])
orders = '1'
if present2: orders='12'
outfile2nd = outfile2
backfile2nd = backfile2
filetag = obsid+'_'+str(ext)+'_'+str(rnu)
#
# ============= main spectrum pha file =======
#
if chatter>4: print("uvotio: write main header")
# create primary header
#
hdu0 = fits.PrimaryHDU()
hdu0.header['CREATED']=('written by uvotio.py '+version)
hdu0.header['DATE']=(str(now))
hdu0.header['AUTHOR']=('UVOTPY author is NPM Kuin (UCL/MSSL)')
hdu0.header['WHEELPOS']=(hdr['wheelpos'])
hdu0.header['FILTER']=(hdr['filter'],'UVOT filter used')
hdu0.header['ORDERS']=(orders,'list of sp. orders included')
hdu0.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
hdu0.header['INSTRUME']=('UVOTA ','Instrument Name')
hdu0.header['FILEVERS']=(fileversion,'UVOTPY file version')
hdu0.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
hdu0.header['ORI_FILE']=(obsid+'+'+str(ext),'fileid and extension of extracted spectrum')
hdu0.header['RA_X']=(ra,'RA of source extracted spectrum')
hdu0.header['DEC_X']=(dec,'DEC of source extracted spectrum')
hdu0.header['DETX_X']=(anker[0],'XDET position source anker in DET coord')
hdu0.header['DETY_X']=(anker[1],'YDET position source anker in DET coord')
hdu0.header['POSX_AS']=(phx,'angle boresight in deg in DETX direction')
hdu0.header['POSY_AS']=(phy,'angle boresight in deg in DETY direction')
hdu0.header['SPEC_OFF']=(offset,'distance to spectrum from anker position (DETX_X,DETY_X)')
hdu0.header['ANGLE']=(calspec_first[-1],'rotation-angle used to extract extimg')#~~TODO:
hdulist=fits.HDUList([hdu0])
#
# Main fits part
#
hdr0 = hdr.copy()
hdr0['ORI_FILE']=(obsid+'+'+str(ext),'fileid and extension of extracted spectrum')
hdr0['RA_X']=(ra,'RA of source extracted spectrum')
hdr0['DEC_X']=(dec,'DEC of source extracted spectrum')
hdr0['DETX_X']=(anker[0],'XDET position source anker in DET coord')
hdr0['DETY_X']=(anker[1],'YDET position source anker in DET coord')
hdr0['POSX_AS']=(phx,'angle boresight in deg in DETX direction')
hdr0['POSY_AS']=(phy,'angle boresight in deg in DETY direction')
hdr0['SPEC_OFF']=(offset,'distance to spectrum from anker position (DETX_X,DETY_X)')
hdr0['ANGLE']=(calspec_first[-1],'rotation-angle used to extract extimg')#~~TODO:
#
if chatter>4: print("uvotio: write first header")
# first extension: first order spectrum ; add extname everywhere
#
if fileversion == 1:
col11 = fits.Column(name='CHANNEL ',format='I',array=spectrum_first[0])
col12 = fits.Column(name='COUNTS ',format='I',array=spectrum_first[1],unit='COUNTS')
col13 = fits.Column(name='STAT_ERR',format='E',array=spectrum_first[2],unit='COUNTS')
col14 = fits.Column(name='QUALITY ',format='I',array=spectrum_first[3] )
col15 = fits.Column(name='APERCORR',format='E',array=spectrum_first[4] )
col16 = fits.Column(name='EXPOSURE',format='E',array=spectrum_first[5],unit='s' )
# TODO:
cols1 = fits.ColDefs([col11,col12,col13,col14,col15,col16])
elif fileversion == 2:
col11 = fits.Column(name='CHANNEL ',format='I',array=spectrum_first[0])
col12 = fits.Column(name='RATE ',format='E',array=spectrum_first[1],unit='counts/s')
col13 = fits.Column(name='STAT_ERR',format='E',array=spectrum_first[2],unit='counts/s')
#col14 = fits.Column(name='SYS_ERR',format='E',array=spectrum_first[2],unit='counts/s')
col15 = fits.Column(name='QUALITY ',format='I',array=spectrum_first[3] )
cols1 = fits.ColDefs([col11,col12,col13,col15])
tbhdu1 = fits.BinTableHDU.from_columns(cols1)
if fileversion == 1:
tbhdu1.header['comment']='COUNTS are observed, uncorrected counts'
elif fileversion == 2:
tbhdu1.header['comment']='RATE are the fully corrected count rates'
try:
tbhdu1.header['EXPID']=(hdr['expid'],'Exposure ID')
except:
pass
tbhdu1.header['EXTNAME']=('SPECTRUM','Name of this binary table extension')
tbhdu1.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
tbhdu1.header['INSTRUME']=('UVOTA ','Instrument Name')
tbhdu1.header['TIMESYS ']=(hdr['timesys'],'time system')
tbhdu1.header['FILETAG']=(filetag,'unique set id')
tbhdu1.header['MJDREFI ']=(hdr['mjdrefi'],'Reference MJD time integer part')
tbhdu1.header['MJDREFF ']=(hdr['mjdreff'],'Reference MJD fractional part')
tbhdu1.header['TIMEREF ']=(hdr['timeref'],'time reference barycentric/local')
tbhdu1.header['TASSIGN ']=(hdr['tassign'],'time assigned by clock')
tbhdu1.header['TIMEUNIT']=(hdr['timeunit'])
tbhdu1.header['TIERRELA']=(hdr['TIERRELA'],'time relative error [s/s]')
tbhdu1.header['TIERABSO']=(hdr['TIERABSO'],'timing precision in seconds')
tbhdu1.header['TSTART']=(hdr['TSTART'])
tbhdu1.header['TSTOP']=(hdr['TSTOP'])
tbhdu1.header['COMMENT']="Note that all Swift times are not clock corrected by definition"
tbhdu1.header['DATE-OBS']=hdr['DATE-OBS']
tbhdu1.header['DATE-END']=hdr['DATE-END']
tbhdu1.header['CLOCKAPP']=(hdr['CLOCKAPP'],'if clock correction was applied')
tbhdu1.header['TELAPSE']=(hdr['TELAPSE'],'Tstop - Tstart')
tbhdu1.header['EXPOSURE']=(hdr['EXPOSURE'],'Average Total exposure, with all known corrections')
tbhdu1.header['DEADC']=(hdr['DEADC'],'dead time correction')
tbhdu1.header['FRAMTIME']=(hdr['FRAMTIME'],'frame exposure time')
tbhdu1.header['DETNAM']=hdr['DETNAM']
tbhdu1.header['FILTER']=hdr['FILTER']
tbhdu1.header['OBS_ID']=(hdr['OBS_ID'],'observation id')
tbhdu1.header['TARG_ID']=(hdr['TARG_ID'],'Target ID')
if 'SEQ_NUM' in hdr: tbhdu1.header['SEQ_NUM']=hdr['SEQ_NUM']
tbhdu1.header['EQUINOX']=hdr['EQUINOX']
tbhdu1.header['RADECSYS']=hdr['RADECSYS']
tbhdu1.header['WHEELPOS']=(hdr['WHEELPOS'],'filterweel position')
tbhdu1.header['SPECTORD']=(1,'spectral order')
try:
tbhdu1.header['BLOCLOSS']=(hdr['BLOCLOSS'],'[s] Exposure time under BLOCKED filter')
tbhdu1.header['STALLOSS']=(hdr['STALLOSS'],'[s] Est DPU stalling time loss')
tbhdu1.header['TOSSLOSS']=(hdr['TOSSLOSS'],'[s] Est Shift&Toss time loss')
tbhdu1.header['MOD8CORR']=(hdr['MOD8CORR'],'Was MOD8 correction applied')
tbhdu1.header['FLATCORR']=(hdr['FLATCORR'],'was flat field correction applied')
except:
pass
tbhdu1.header['ASPCORR']=(hdr['ASPCORR'],'Aspect correction method')
tbhdu1.header['HDUCLASS']=('OGIP','format attemts to follow OGIP standard')
tbhdu1.header['HDUCLAS1']=('SPECTRUM','PHA dataset (OGIP memo OGIP-92-007')
if fileversion == 1:
tbhdu1.header['HDUCLAS2']=('TOTAL','Gross PHA Spectrum (source + background)')
tbhdu1.header['HDUCLAS3']=('COUNT','PHA data stored as counts (not count/s)')
tbhdu1.header['POISSERR']=('F','Poissonian errors not applicable')
tbhdu1.header['BACKFILE']=(backfile1,'Background FITS file')
elif fileversion == 2:
tbhdu1.header['HDUCLAS2']=('NET','Gross PHA Spectrum (source only)')
tbhdu1.header['HDUCLAS3']=('RATE','PHA data stored as rate (not count)')
tbhdu1.header['POISSERR']=('F','Poissonian errors not applicable')
tbhdu1.header['HDUVERS1']=('1.1.0','Version of format (OGIP memo OGIP-92-007a)')
tbhdu1.header['CHANTYPE']=('PHA','Type of channel PHA/PI')
tbhdu1.header['TLMIN1']=(1,'Lowest legal channel number')
tbhdu1.header['TLMAX1']=(len(spectrum_first[0]),'Highest legal channel number')
tbhdu1.header['GROUPING']=(0,'No grouping of the data has been defined')
tbhdu1.header['DETCHANS']=(len(spectrum_first[0]),'Total number of detector channels available')
tbhdu1.header['AREASCAL']=(1,'Area scaling factor')
tbhdu1.header['BACKSCAL']=(1,'Background scaling factor')
tbhdu1.header['CORRSCAL']=(1,'Correlation scaling factor')
tbhdu1.header['BACKFILE']=('NONE','Background FITS file')
tbhdu1.header['CORRFILE']=('NONE ','Correlation FITS file')
tbhdu1.header['RESPFILE']=('NONE','Redistribution matrix')
tbhdu1.header['ANCRFILE']=('NONE ','Ancillary response')
tbhdu1.header['XFLT0001']=('NONE ','XSPEC selection filter description')
tbhdu1.header['CRPIX1']=('(1,'+str(len(spectrum_first[0]))+')','Channel binning of the CHANNEL column')
tbhdu1.header['PHAVERSN']=('1992a ','OGIP memo number for file format')
# convert ra,dec -> zerodetx,zerodety using uvotapplywcs?
# look in code uvotimgrism
tbhdu1.header['ZERODETX']=(zeroxy[0],'zeroth order position on image')
tbhdu1.header['ZERODETY']=(zeroxy[1],'zeroth order position on image')
tbhdu1.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
hdulist.append(tbhdu1)
#
if chatter>4: print("uvotio: write second header")
# second extension first order
#
if fileversion == 2:
if calmode:
(dis,wave,sprate,bg1rate,bck_strips,flux,flux_err,
quality,aper1corr,expospec1,coi_sp1,bgcoi_sp1,
co_sp1rate,co_bgrate) = calspec_first
else:
(dis,wave,sprate,bg1rate,bck_strips,flux,flux_err,
quality,aper1corr,expospec1,coi_sp1,bgcoi_sp1) = calspec_first
col23 = fits.Column(name='BKGRATE1',format='E',array=bg1rate,unit='c/s')
col24A = fits.Column(name='BG_L ',format='E',array=bck_strips[0],unit='c/s')
col24B = fits.Column(name='BG_R ',format='E',array=bck_strips[1],unit='c/s')
elif fileversion == 1:
(dis,wave,
sprate, # fully corrected counts in the spectrum
bg_rate,
bg1rate, # fully corrected background counts under the spectrum
bg2rate,
flux, flux_err, # fully corrected flux
quality,
aper1corr,
expospec1,
coi_sp1,
bgcoi_sp1,
senscorr,
binwidth,
effarea1,
spwidup1,spwiddown1,
angle, ) = calspec_first
col23 = fits.Column(name='BGRATE1 ',format='E',array=bg_rate,unit='c/s')
col24A = fits.Column(name='BG_L ',format='E',array=bg1rate,unit='c/s')
col24B = fits.Column(name='BG_R ',format='E',array=bg2rate,unit='c/s')
col20 = fits.Column(name='PIXNO ',format='I',array=dis ,unit='pix')
col21 = fits.Column(name='LAMBDA ',format='E',array=wave ,unit='A')
col22 = fits.Column(name='NETRATE',format='E',array=sprate ,unit='c/s')
col25 = fits.Column(name='FLUX ',format='E',array=flux ,unit='erg cm-2 s-1 A-1')
col26 = fits.Column(name='FLUXERR',format='E',array=flux_err,unit='erg cm-2 s-1 A-1')
col27 = fits.Column(name='QUALITY',format='I',array=quality,unit='NONE')
col28 = fits.Column(name='APERCOR1',format='E',array=aper1corr,unit='NONE')
col29 = fits.Column(name='EXPOSURE',format='E',array=expospec1,unit='s')
col29A = fits.Column(name='SP1_COIF',format='E',array=coi_sp1,unit='NONE')
col29B = fits.Column(name='BG1_COIF',format='E',array=bgcoi_sp1,unit='NONE')
col29C = fits.Column(name='SENSCORR',format='E',array=senscorr,unit='NONE')
col29D = fits.Column(name='BINWIDTH',format='E',array=binwidth,unit='NONE')
col29E = fits.Column(name='EFFAREA1',format='E',array=effarea1,unit='NONE')
col29F = fits.Column(name='WIDTH_UP',format='E',array=spwidup1,unit='pixel')
col29G = fits.Column(name='WIDTH_DW',format='E',array=spwiddown1,unit='pixel')
if present2: # second order
(pix2,wave2,sp2rate,bg_2rate,flux2,flux2_err,qual2,aper2corr,expospec2,
coi_sp2, bgcoi_sp2, senscorr2, binwidth2, effarea2) = calspec_second
col30 = fits.Column(name='PIXNO2',format='I',array=pix2,unit='pix')
col31 = fits.Column(name='LAMBDA2',format='E',array=wave2,unit='A')
col32 = fits.Column(name='NETRATE2',format='E',array=sp2rate,unit='c/s')
col33 = fits.Column(name='BGRATE2',format='E',array=bg_2rate,unit='c/s')
col34 = fits.Column(name='FLUX2',format='E',array=flux2,unit='erg cm-2 s-1 A-1')
col35 = fits.Column(name='FLUXERR2',format='E',array=flux2_err,unit='erg cm-2 s-1 A-1')
col36 = fits.Column(name='QUALITY2',format='I',array=qual2,unit='NONE')
col37 = fits.Column(name='APERCOR2',format='E',array=aper2corr,unit='NONE')
col38 = fits.Column(name='EXPOSURE2',format='E',array=expospec2,unit='s')
col38A = fits.Column(name='SP2_COI',format='E',array=coi_sp2,unit='NONE')
col38B = fits.Column(name='BG2_COI',format='E',array=bgcoi_sp2,unit='NONE')
col38C = fits.Column(name='SENSCOR2',format='E',array=senscorr2,unit='NONE')
col38D = fits.Column(name='BINWIDT2',format='E',array=binwidth2,unit='NONE')
col38E = fits.Column(name='EFFAREA2',format='E',array=effarea2,unit='NONE')
if fileversion == 1:
cols2 = fits.ColDefs([col20,col21,col22,col23,col24A,col24B,col25,col26,col27,col28,
col29,col29A,col29B,col29C,col29D,col29E,col29F,col29G,col30,col31,col32,col33,col34,col35,col36,
col37,col38,col38A,col38C,col38D,col38E])
elif fileversion == 2:
if calmode:
colcoA =fits.Column(name='COSP1RAT',format='E',array=co_sp1rate,unit='c/s')
colcoB =fits.Column(name='COBG1RAT',format='E',array=co_bgrate,unit='c/s')
cols2 = fits.ColDefs([col20,col21,col22,col23,col25,col26,col27,col28,
col29,col29A,col29B,col30,col31,col32,col33,col34,col35,col36,
col37,col38A,col38B,col24A,col24B,colcoA,colcoB])
else:
cols2 = fits.ColDefs([col20,col21,col22,col23,col25,col26,col27,col28,
col29,col29A,col29B,col30,col31,col32,col33,col34,col35,col36,
col37,col38A,col38B,col24A,col24B])
else: # not present2
if fileversion == 1:
cols2 = fits.ColDefs([col20,col21,col22,col23,col24A,col24B,col25,col26,col27,
col28,col29,col29A,col29B,col29C,col29D,col29E,col29F,col29G])
elif fileversion == 2:
if calmode:
colcoA =fits.Column(name='COSP1RAT',format='E',array=co_sp1rate,unit='c/s')
colcoB =fits.Column(name='COBG1RAT',format='E',array=co_bgrate,unit='c/s')
cols2 = fits.ColDefs([col20,col21,col22,col23,col25,col26,col27,
col28,col29,col29A,col29B,col24A,col24B,colcoA,colcoB])
else:
cols2 = fits.ColDefs([col20,col21,col22,col23,col25,col26,col27,
col28,col29,col29A,col29B,col24A,col24B])
tbhdu2 = fits.BinTableHDU.from_columns(cols2)
if fileversion == 1:
tbhdu2.header['HISTORY']='coi-loss, aperture - corrected flux and rates'
elif fileversion == 2:
tbhdu2.header['HISTORY']='no coi-loss, no aperture - uncorrected rates'
tbhdu2.header['HISTORY']='coi-loss, aperture - corrected flux and rates'
if history != None:
msg1 = history.split('\n')
for msg in msg1: tbhdu1.header.add_history(msg)
try:
tbhdu2.header['EXPID']=hdr['expid']
except:
pass
# tbhdu2.header.update('EXTNAME','FIRST_ORDER_NET_SPECTRUM')
tbhdu2.header['EXTNAME']='CALSPEC'
tbhdu2.header['FILETAG']=(filetag,'unique set id')
tbhdu2.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
tbhdu2.header['INSTRUME']=('UVOTA ','Instrument Name')
tbhdu2.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
tbhdu2.header['TIMESYS ']=hdr['timesys']
tbhdu2.header['MJDREFI ']=hdr['mjdrefi']
tbhdu2.header['MJDREFF ']=hdr['mjdreff']
tbhdu2.header['TIMEREF ']=hdr['timeref']
tbhdu2.header['TASSIGN ']=hdr['tassign']
tbhdu2.header['TIMEUNIT']=hdr['timeunit']
tbhdu2.header['TIERRELA']=hdr['TIERRELA']
tbhdu2.header['TIERABSO']=hdr['TIERABSO']
tbhdu2.header['COMMENT']="Note that all Swift times are not clock corrected by definition"
tbhdu2.header['TSTART']=hdr['TSTART']
tbhdu2.header['TSTOP']=hdr['TSTOP']
tbhdu2.header['DATE-OBS']=hdr['DATE-OBS']
tbhdu2.header['DATE-END']=hdr['DATE-END']
tbhdu2.header['CLOCKAPP']=hdr['CLOCKAPP']
tbhdu2.header['TELAPSE']=hdr['TELAPSE']
tbhdu2.header['EXPOSURE']=hdr['EXPOSURE']
tbhdu2.header['DEADC']=hdr['DEADC']
tbhdu2.header['FRAMTIME']=hdr['FRAMTIME']
tbhdu2.header['DETNAM']=hdr['DETNAM']
tbhdu2.header['FILTER']=hdr['FILTER']
tbhdu2.header['OBS_ID']=hdr['OBS_ID']
tbhdu2.header['TARG_ID']=hdr['TARG_ID']
tbhdu2.header['EQUINOX']=hdr['EQUINOX']
tbhdu2.header['RADECSYS']=hdr['RADECSYS']
tbhdu2.header['WHEELPOS']=hdr['WHEELPOS']
try:
tbhdu2.header['BLOCLOSS']=hdr['BLOCLOSS']
tbhdu2.header['MOD8CORR']=hdr['MOD8CORR']
tbhdu2.header['FLATCORR']=hdr['FLATCORR']
tbhdu2.header['STALLOSS']=hdr['STALLOSS']
tbhdu2.header['TOSSLOSS']=hdr['TOSSLOSS']
except:
print("WARNING problem found in uvotio line 2440 try update XXXXLOSS keywords")
pass
if calmode:
tbhdu2.header['COIWIDTH'] = 2*coi_half_width
tbhdu2.header['HDUCLASS']='OGIP'
tbhdu2.header['HDUCLAS1']='SPECTRUM'
if fileversion == 1:
tbhdu2.header['HDUCLAS2']='TOTAL'
elif fileversion == 2:
tbhdu2.header['HDUCLAS2']='NET'
tbhdu2.header['ZERODETX']=(zeroxy[0],'zeroth order position on image')
tbhdu2.header['ZERODETY']=(zeroxy[1],'zeroth order position on image')
hdulist.append(tbhdu2)
#
if chatter>4: print("uvotio: write third header")
# THIRD extension: extracted image
#
hdu3 = fits.ImageHDU(extimg)
hdu3.header['EXTNAME']='SPECTRUM_IMAGE'
try:
hdu3.header['EXPID']=hdr['expid']
except:
pass
hdu3.header['ANKXIMG']=(ank_c[1],'Position anchor in image')
hdu3.header['ANKYIMG']=(ank_c[0],'Position anchor in image')
hdu3.header['FILETAG']=(filetag,'unique set id')
hdulist.append(hdu3)
#
# FOURTH extension: extracted image
#
if len(expmap) > 1:
hdu4 = fits.ImageHDU(expmap)
hdu4.header['EXTNAME']='EXPOSURE_MAP'
try:
hdu4.header['EXPID']=hdr['expid']
except:
pass
hdu4.header['ANKXIMG']=(ank_c[1],'Position anchor in image')
hdu4.header['ANKYIMG']=(ank_c[0],'Position anchor in image')
hdu4.header['FILETAG']=(filetag,'unique set id')
hdulist.append(hdu4)
try:
hdulist.writeto(outfile1,clobber=clobber)
except:
print("WARNING : NO OUTPUT FILE CREATED. "+outfile1+" EXISTS and CLOBBER not set")
pass
#
#
################ ============= second order spectrum pha file ======= ###############
if present2:
#
if chatter>4: print("uvotio: write 2nd order 0 header")
# create primary header
#
hdu0 = fits.PrimaryHDU()
hdu0.header['CREATED']='written by uvotio.py '+version
hdu0.header['DATE']=str(now)
hdu0.header['AUTHOR']='UVOTPY author is NPM Kuin (UCL/MSSL)'
hdu0.header['WHEELPOS']=hdr['wheelpos']
hdu0.header['FILTER']=(hdr['filter'],'UVOT filter used')
hdu0.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
hdu0.header['INSTRUME']=('UVOTA ','Instrument Name')
hdu0.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
hdulist=fits.HDUList([hdu0])
#
# Main fits part
#
hdr0 = hdr.copy()
hdr0['ORI_FILE']=(obsid+'+'+str(ext),'fileid and extension of extracted spectrum')
hdr0['RA_X']=(ra,'RA of source extracted spectrum')
hdr0['DEC_X']=(dec,'Dec. of source extracted spectrum')
hdr0['DETX_X']=(anker[0],'XDET position source anker in DET coord')
hdr0['DETY_X']=(anker[1],'YDET position source anker in DET coord')
hdr0['POSX_AS']=(phx,'angle boresight in deg in DETX direction')
hdr0['POSY_AS']=(phy,'angle boresight in deg in DETY direction')
hdr0['SPEC_OFF']=(offset,'distance to spectrum from anker position (DETX_X,DETY_X)')
#
# first extension: first order spectrum ; add extname everywhere
#
if fileversion == 1:
(channel2, sp2counts, stat_err, qual2, aper2corr,expospec2 ) = spectrum_second
col21 = fits.Column(name='CHANNEL ',format='I',array=channel2 )
col22 = fits.Column(name='SP_COUNTS ',format='I',array=sp2counts,unit='COUNTS')
col23 = fits.Column(name='SP_STAT_ERR',format='E',array=stat_err,unit='COUNTS')
col24 = fits.Column(name='QUALITY ',format='I',array=qual2 )
col25 = fits.Column(name='APERCORR',format='E',array=aper2corr )
col26 = fits.Column(name='EXPOSURE',format='E',array=expospec2,unit='s' )
cols1 = fits.ColDefs([col21,col22,col23,col24,col25,col26])
elif fileversion == 2:
(channel2, sp2rate, rate_err, qual2) = spectrum_second
col21 = fits.Column(name='CHANNEL ',format='I',array=channel2 )
col22 = fits.Column(name='RATE ',format='E',array=sp2rate,unit='COUNTS/S')
col23 = fits.Column(name='STAT_ERR',format='E',array=rate_err,unit='COUNTS/S')
col24 = fits.Column(name='QUALITY ',format='I',array=qual2 )
cols1 = fits.ColDefs([col21,col22,col23,col24])
tbhdu1 = fits.BinTableHDU.from_columns(cols1)
try:
tbhdu1.header['EXPID']=(hdr['expid'],'Exposure ID')
except:
pass
if chatter>4: print("uvotio: write 2nd order 1 header")
tbhdu1.header['EXTNAME']=('SPECTRUM','Name of this binary table extension')
tbhdu1.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
tbhdu1.header['INSTRUME']=('UVOTA ','Instrument Name')
tbhdu1.header['TIMESYS ']=(hdr['timesys'],'time system')
tbhdu1.header['FILETAG']=(filetag,'unique set id')
tbhdu1.header['MJDREFI ']=(hdr['mjdrefi'],'Reference MJD time integer part')
tbhdu1.header['MJDREFF ']=(hdr['mjdreff'],'Reference MJD fractional part')
tbhdu1.header['TIMEREF ']=(hdr['timeref'],'time reference barycentric/local')
tbhdu1.header['TASSIGN ']=(hdr['tassign'],'time assigned by clock')
tbhdu1.header['TIMEUNIT']=hdr['timeunit']
tbhdu1.header['TIERRELA']=(hdr['TIERRELA'],'time relative error [s/s]')
tbhdu1.header['TIERABSO']=(hdr['TIERABSO'],'timing precision in seconds')
tbhdu1.header['TSTART']=hdr['TSTART']
tbhdu1.header['TSTOP']=hdr['TSTOP']
tbhdu1.header['DATE-OBS']=hdr['DATE-OBS']
tbhdu1.header['DATE-END']=hdr['DATE-END']
tbhdu1.header['CLOCKAPP']=(hdr['CLOCKAPP'],'if clock correction was applied')
tbhdu1.header['TELAPSE']=(hdr['TELAPSE'],'Tstop - Tstart')
tbhdu1.header['EXPOSURE']=(hdr['EXPOSURE'],'Average Total exposure, with all known corrections')
tbhdu1.header['DEADC']=(hdr['DEADC'],'dead time correction')
tbhdu1.header['FRAMTIME']=(hdr['FRAMTIME'],'frame exposure time')
tbhdu1.header['DETNAM']=hdr['DETNAM']
tbhdu1.header['FILTER']=hdr['FILTER']
tbhdu1.header['OBS_ID']=(hdr['OBS_ID'],'observation id')
tbhdu1.header['TARG_ID']=(hdr['TARG_ID'],'Target ID image')
tbhdu1.header['EQUINOX']=hdr['EQUINOX']
tbhdu1.header['RADECSYS']=hdr['RADECSYS']
tbhdu1.header['WHEELPOS']=(hdr['WHEELPOS'],'filterweel position')
tbhdu1.header['SPECTORD']=(2,'spectral order')
try:
tbhdu1.header['BLOCLOSS']=(hdr['BLOCLOSS'],'[s] Exposure time under BLOCKED filter')
tbhdu1.header['STALLOSS']=(hdr['STALLOSS'],'[s] Est DPU stalling time loss')
tbhdu1.header['TOSSLOSS']=(hdr['TOSSLOSS'],'[s] Est Shift&Toss time loss')
tbhdu1.header['MOD8CORR']=(hdr['MOD8CORR'],'Was MOD8 correction applied')
tbhdu1.header['FLATCORR']=(hdr['FLATCORR'],'was LSS correction applied')
except:
pass
tbhdu1.header['ASPCORR']=(hdr['ASPCORR'],'Aspect correction method')
tbhdu1.header['HDUCLASS']=('OGIP','format attemts to follow OGIP standard')
tbhdu1.header['HDUCLAS1']=('SPECTRUM','PHA dataset (OGIP memo OGIP-92-007')
if fileversion == 1:
tbhdu1.header['HDUCLAS2']=('TOTAL','Gross PHA Spectrum (source + background)')
tbhdu1.header['HDUCLAS3']=('COUNT','PHA data stored as counts (not count/s)')
tbhdu1.header['BACKFILE']=(backfile2,'Background FITS file')
elif fileversion == 2:
tbhdu1.header['HDUCLAS2']=('NET','NET PHA Spectrum (background subtracted)')
tbhdu1.header['HDUCLAS3']=('RATE','PHA data stored as count/s')
tbhdu1.header['HDUVERS1']=('1.1.0','Version of format (OGIP memo OGIP-92-007a)')
tbhdu1.header['CHANTYPE']=('PHA','Type of channel PHA/PI')
tbhdu1.header['TLMIN1 ']=(1,'Lowest legal channel number')
tbhdu1.header['TLMAX1']=(len(channel2),'Highest legal channel number')
tbhdu1.header['POISSERR']=('F','Poissonian errors not applicable')
tbhdu1.header['GROUPING']=(0,'No grouping of the data has been defined')
tbhdu1.header['DETCHANS']=(len(channel2),'Total number of detector channels available')
tbhdu1.header['AREASCAL']=(1,'Area scaling factor')
tbhdu1.header['BACKSCAL']=(1,'Background scaling factor')
tbhdu1.header['CORRSCAL']=(1,'Correlation scaling factor')
tbhdu1.header['BACKFILE']=('NONE','Background FITS file')
tbhdu1.header['CORRFILE']=('NONE ','Correlation FITS file')
tbhdu1.header['RESPFILE']=('NONE','Redistribution matrix')
tbhdu1.header['ANCRFILE']=('NONE ','Ancillary response')
tbhdu1.header['XFLT0001']=('NONE ','XSPEC selection filter description')
tbhdu1.header['CRPIX1 ']=('(1,'+str(len(channel2))+')','Channel binning of the CHANNEL column')
tbhdu1.header['PHAVERSN']=('1992a ','OGIP memo number for file format')
tbhdu1.header['ZERODETX']=(zeroxy[0],'zeroth order position on image')
tbhdu1.header['ZERODETY']=(zeroxy[1],'zeroth order position on image')
tbhdu1.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
hdulist.append(tbhdu1)
try:
hdulist.writeto(outfile2nd,clobber=clobber)
except:
print("WARNING : NO OUTPUT FILE CREATED. "+outfile2nd+" EXISTS and CLOBBER not set")
pass
if fileversion == 1:
#
# ================= background PHA files ============================
#
if chatter>4: print("uvotio: write bkg 0 order header")
# first order background PHA file
#
# create primary header
#
hdu0 = fits.PrimaryHDU()
hdu0.header['CREATED']=('written by uvotio.py '+version)
hdu0.header['DATE']=str(now)
hdu0.header['AUTHOR']='NPM Kuin (UCL/MSSL)'
hdu0.header['WHEELPOS']=hdr['wheelpos']
hdu0.header['FILTER']=(hdr['filter'],'UVOT filter used')
hdu0.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
hdu0.header['INSTRUME']=('UVOTA ','Instrument/Detector Name')
hdu0.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
hdulist=fits.HDUList([hdu0])
#
# Main fits part
#
hdr0 = hdr.copy()
hdr0['ORI_FILE']=(obsid+'+'+str(ext),'fileid and extension of extracted spectrum')
hdr0['RA_X']=(ra,'RA of source extracted spectrum')
hdr0['DEC_X']=(dec,'DEC of source extracted spectrum')
hdr0['DETX_X']=(anker[0],'XDET position source anker in DET coord')
hdr0['DETY_X']=(anker[1],'YDET position source anker in DET coord')
hdr0['POSX_AS']=(phx,'angle boresight in deg in DETX direction')
hdr0['POSY_AS']=(phy,'angle boresight in deg in DETY direction')
hdr0['SPEC_OFF']=(offset,'distance to spectrum from anker position (DETX_X,DETY_X)')
#
if chatter>4: print("uvotio: write bkg 1 order header")
# first extension: first order spectrum ; add extname everywhere
#
channel = back_first[0]
bgcounts = back_first[1]
bgstat_err = back_first[2]
bgquality = back_first[3]
channel, bgcounts, bgstat_err, bgquality, aper1corr, expospec1 = back_first
col11 = fits.Column(name='CHANNEL ',format='I',array=channel )
col12 = fits.Column(name='BKG_COUNTS ',format='I',array=bgcounts ,unit='COUNTS')
col13 = fits.Column(name='BKG_STAT_ERR',format='E',array=bgstat_err,unit='COUNTS')
col14 = fits.Column(name='QUALITY ',format='I',array=bgquality )
col15 = fits.Column(name='EXPOSURE',format='E',array=expospec1 ,unit='s' )
cols1 = fits.ColDefs([col11,col12,col13,col14,col15])
tbhdu1 = fits.BinTableHDU.from_columns(cols1)
try:
tbhdu1.header['EXPID']=(hdr['expid'],'Exposure ID')
except:
pass
# tbhdu1.header.update('EXTNAME','FIRST_ORDER_PHA_BACKGROUND','Name of this binary table extension')
tbhdu1.header['EXTNAME']=('SPECTRUM','Name of this binary table extension')
tbhdu1.header['FILETAG']=(filetag,'unique set id')
tbhdu1.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
tbhdu1.header['INSTRUME']=('UVOTA ','Instrument Name')
if 'CMPOTHRS' in hdr: tbhdu1.header['CMPOTHRS']=(hdr['CMPOTHRS'],'overflow of lossy compression algorith')
if 'CMPUTHRS' in hdr: tbhdu1.header['CMPUTHRS']=(hdr['CMPUTHRS'],'underflow of lossy compression algorith')
if 'CMPCNTMN' in hdr: tbhdu1.header['CMPCNTMN']=(hdr['CMPCNTMN'],'compression losses have occurred in the image')
tbhdu1.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
tbhdu1.header['TIMESYS ']=(hdr['timesys'],'time system')
tbhdu1.header['MJDREFI ']=(hdr['mjdrefi'],'Reference MJD time integer part')
tbhdu1.header['MJDREFF ']=(hdr['mjdreff'],'Reference MJD fractional part')
tbhdu1.header['TIMEREF ']=(hdr['timeref'],'time reference barycentric/local')
tbhdu1.header['TASSIGN ']=(hdr['tassign'],'time assigned by clock')
tbhdu1.header['TIMEUNIT']=(hdr['timeunit'])
tbhdu1.header['TIERRELA']=(hdr['TIERRELA'],'time relative error [s/s]')
tbhdu1.header['TIERABSO']=(hdr['TIERABSO'],'timing precision in seconds')
tbhdu1.header['TSTART']=(hdr['TSTART'])
tbhdu1.header['TSTOP']=(hdr['TSTOP'])
tbhdu1.header['DATE-OBS']=(hdr['DATE-OBS'])
tbhdu1.header['DATE-END']=(hdr['DATE-END'])
tbhdu1.header['CLOCKAPP']=(hdr['CLOCKAPP'],'if clock correction was applied')
tbhdu1.header['TELAPSE']=(hdr['TELAPSE'],'Tstop - Tstart')
tbhdu1.header['EXPOSURE']=(hdr['EXPOSURE'],'Total exposure, with all known corrections')
tbhdu1.header['DEADC']=(hdr['DEADC'],'dead time correction')
tbhdu1.header['FRAMTIME']=(hdr['FRAMTIME'],'frame exposure time')
tbhdu1.header['DETNAM']=(hdr['DETNAM'])
tbhdu1.header['FILTER']=(hdr['FILTER'])
tbhdu1.header['OBS_ID']=(hdr['OBS_ID'],'observation id')
tbhdu1.header['TARG_ID']=(hdr['TARG_ID'],'Target ID')
if 'SEQ_NUM' in hdr: tbhdu1.header['SEQ_NUM']=(hdr['SEQ_NUM'])
tbhdu1.header['EQUINOX']=(hdr['EQUINOX'])
tbhdu1.header['RADECSYS']=(hdr['RADECSYS'])
tbhdu1.header['WHEELPOS']=(hdr['WHEELPOS'],'filterweel position')
tbhdu1.header['SPECTORD']=(1,'spectral order')
try:
tbhdu1.header['BLOCLOSS']=(hdr['BLOCLOSS'],'[s] Exposure time under BLOCKED filter')
tbhdu1.header['STALLOSS']=(hdr['STALLOSS'],'[s] Est DPU stalling time loss')
tbhdu1.header['TOSSLOSS']=(hdr['TOSSLOSS'],'[s] Est Shift&Toss time loss')
tbhdu1.header['MOD8CORR']=(hdr['MOD8CORR'],'Was MOD8 correction applied')
tbhdu1.header['FLATCORR']=(hdr['FLATCORR'],'LSS correction applied')
except:
pass
tbhdu1.header['ASPCORR']=('GAUSSIAN','Aspect correction method')
tbhdu1.header['HDUCLASS']=('OGIP','format attemts to follow OGIP standard')
tbhdu1.header['HDUCLAS1']=('SPECTRUM','PHA dataset (OGIP memo OGIP-92-007')
tbhdu1.header['HDUCLAS2']=('TOTAL','Gross PHA Spectrum (source + background)')
tbhdu1.header['HDUCLAS3']=('COUNT','PHA data stored as counts (not count/s)')
tbhdu1.header['HDUVERS1']=('1.1.0','Version of format (OGIP memo OGIP-92-007a)')
tbhdu1.header['CHANTYPE']=('PI','Type of channel PHA/PI')
tbhdu1.header['TLMIN1 ']=(1,'Lowest legal channel number')
tbhdu1.header['TLMAX1']=(len(channel),'Highest legal channel number')
tbhdu1.header['POISSERR']=(False,'Poissonian errors not applicable')
tbhdu1.header['GROUPING']=(0,'No grouping of the data has been defined')
tbhdu1.header['DETCHANS']=(len(channel),'Total number of detector channels available')
tbhdu1.header['AREASCAL']=(1,'Area scaling factor')
tbhdu1.header['BACKSCAL']=(1,'Background scaling factor')
tbhdu1.header['CORRSCAL']=(1,'Correlation scaling factor')
tbhdu1.header['BACKFILE']=('NONE','Background FITS file')
tbhdu1.header['CORRFILE']=('NONE ','Correlation FITS file')
tbhdu1.header['RESPFILE']=('NONE','Redistribution matrix')
tbhdu1.header['ANCRFILE']=('NONE ','Ancillary response')
tbhdu1.header['XFLT0001']=('NONE ','XSPEC selection filter description')
tbhdu1.header['CRPIX1 ']=('(1,'+str(len(channel))+')','Channel binning of the CHANNEL column')
tbhdu1.header['PHAVERSN']=('1992a ','OGIP memo number for file format')
tbhdu1.header['ZERODETX']=(zeroxy[0],'zeroth order position on image')
tbhdu1.header['ZERODETY']=(zeroxy[1],'zeroth order position on image')
hdulist.append(tbhdu1)
try:
hdulist.writeto(backfile1,clobber=clobber)
except:
print("WARNING : NO OUTPUT FILE CREATED. "+backfile1+" EXISTS and CLOBBER not set")
pass
#
# second order background PHA file
#
if back_second != None:
# create primary header
#
if chatter>4: print("uvotio: write bck 2nd order header")
hdu0 = fits.PrimaryHDU()
hdu0.header['CREATED']='written by uvotio.py '+version
hdu0.header['DATE']=str(now)
hdu0.header['AUTHOR']='NPM Kuin (UCL/MSSL)'
hdu0.header['WHEELPOS']=hdr['wheelpos']
hdu0.header['FILTER']=(hdr['filter'],'UVOT filter used')
hdu0.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
hdu0.header['INSTRUME']=('UVOTA ','Instrument Name')
hdu0.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
hdulist=fits.HDUList([hdu0])
#
# Main fits part
#
hdr0 = hdr.copy()
hdr0['ORI_FILE']=(obsid+'+'+str(ext),'fileid and extension of extracted spectrum')
hdr0['RA_X']=(ra,'RA of source extracted spectrum')
hdr0['DEC_X']=(dec,'DEC of source extracted spectrum')
hdr0['DETX_X']=(anker[0],'XDET position source anker in DET coord')
hdr0['DETY_X']=(anker[1],'YDET position source anker in DET coord')
hdr0['POSX_AS']=(phx,'angle boresight in deg in DETX direction')
hdr0['POSY_AS']=(phy,'angle boresight in deg in DETY direction')
hdr0['SPEC_OFF']=(offset,'distance to spectrum from anker position (DETX_X,DETY_X)')
#
# first extension: first order spectrum ; add extname everywhere
#
channel = back_second[0]
bgcounts = back_second[1]
bgstat_err = back_second[2]
bgquality = back_second[3]
channel, bgcounts, bgstat_err, bgquality, aper2corr, expospec2 = back_second
col11 = fits.Column(name='CHANNEL ',format='I',array=channel )
col12 = fits.Column(name='COUNTS ',format='I',array=bgcounts ,unit='COUNTS')
col13 = fits.Column(name='STAT_ERR',format='E',array=bgstat_err,unit='COUNTS')
col14 = fits.Column(name='QUALITY ',format='I',array=bgquality )
col15 = fits.Column(name='EXPOSURE',format='E',array=expospec2 ,unit='s' )
cols1 = fits.ColDefs([col11,col12,col13,col14,col15])
tbhdu1 = fits.BinTableHDU.from_columns(cols1)
try:
tbhdu1.header['EXPID']=(hdr['expid'],'Exposure ID')
except:
pass
# tbhdu1.header.update('EXTNAME','FIRST_ORDER_PHA_BACKGROUND','Name of this binary table extension')
tbhdu1.header['EXTNAME']=('SPECTRUM','Name of this binary table extension')
tbhdu1.header['FILETAG']=(filetag,'unique set id')
tbhdu1.header['TELESCOP']=('SWIFT ','Telescope (mission) name')
tbhdu1.header['INSTRUME']=('UVOTA ','Instrument Name')
tbhdu1.header['CAL_REF']=('2015MNRAS.449.2514K','CDS Bibcode grism calibration')
tbhdu1.header['TIMESYS ']=(hdr['timesys'],'time system')
tbhdu1.header['MJDREFI ']=(hdr['mjdrefi'],'Reference MJD time integer part')
tbhdu1.header['MJDREFF ']=(hdr['mjdreff'],'Reference MJD fractional part')
tbhdu1.header['TIMEREF ']=(hdr['timeref'],'time reference barycentric/local')
tbhdu1.header['TASSIGN ']=(hdr['tassign'],'time assigned by clock')
tbhdu1.header['TIMEUNIT']=(hdr['timeunit'])
tbhdu1.header['TIERRELA']=(hdr['TIERRELA'],'time relative error [s/s]')
tbhdu1.header['TIERABSO']=(hdr['TIERABSO'],'timing precision in seconds')
tbhdu1.header['TSTART']=hdr['TSTART']
tbhdu1.header['TSTOP']=hdr['TSTOP']
tbhdu1.header['DATE-OBS']=hdr['DATE-OBS']
tbhdu1.header['DATE-END']=hdr['DATE-END']
tbhdu1.header['CLOCKAPP']=(hdr['CLOCKAPP'],'if clock correction was applied')
tbhdu1.header['TELAPSE']=(hdr['TELAPSE'],'Tstop - Tstart')
tbhdu1.header['EXPOSURE']=(hdr['EXPOSURE'],'Total exposure, with all known corrections')
tbhdu1.header['DEADC']=(hdr['DEADC'],'dead time correction')
tbhdu1.header['FRAMTIME']=(hdr['FRAMTIME'],'frame exposure time')
tbhdu1.header['DETNAM']=hdr['DETNAM']
tbhdu1.header['FILTER']=hdr['FILTER']
tbhdu1.header['OBS_ID']=(hdr['OBS_ID'],'observation id')
tbhdu1.header['TARG_ID']=(hdr['TARG_ID'],'Target ID')
#tbhdu1.header.update('SEQ_NUM',hdr['SEQ_NUM'])
tbhdu1.header['EQUINOX']=(hdr['EQUINOX'])
tbhdu1.header['RADECSYS']=(hdr['RADECSYS'])
tbhdu1.header['WHEELPOS']=(hdr['WHEELPOS'],'filterweel position')
tbhdu1.header['SPECTORD']=(2,'spectral order')
try:
tbhdu1.header['BLOCLOSS']=(hdr['BLOCLOSS'],'[s] Exposure time under BLOCKED filter')
tbhdu1.header['STALLOSS']=(hdr['STALLOSS'],'[s] Est DPU stalling time loss')
tbhdu1.header['TOSSLOSS']=(hdr['TOSSLOSS'],'[s] Est Shift&Toss time loss')
tbhdu1.header['MOD8CORR']=(hdr['MOD8CORR'],'Was MOD8 correction applied')
tbhdu1.header['FLATCORR']=(hdr['FLATCORR'],'was flat field correction applied')
except:
pass
tbhdu1.header['ASPCORR']=(hdr['ASPCORR'],'Aspect correction method')
tbhdu1.header['HDUCLASS']=('OGIP','format attemts to follow OGIP standard')
tbhdu1.header['HDUCLAS1']=('SPECTRUM','PHA dataset (OGIP memo OGIP-92-007')
tbhdu1.header['HDUCLAS2']=('TOTAL','Gross PHA Spectrum (source + background)')
tbhdu1.header['HDUCLAS3']=('COUNT','PHA data stored as counts (not count/s)')
tbhdu1.header['HDUVERS1']=('1.1.0','Version of format (OGIP memo OGIP-92-007a)')
tbhdu1.header['CHANTYPE']=('PI','Type of channel PHA/PI')
tbhdu1.header['TLMIN1 ']=(1,'Lowest legal channel number')
tbhdu1.header['TLMAX1']=(len(channel),'Highest legal channel number')
tbhdu1.header['POISSERR']=(False,'Poissonian errors not applicable')
tbhdu1.header['GROUPING']=(0,'No grouping of the data has been defined')
tbhdu1.header['DETCHANS']=(len(channel),'Total number of detector channels available')
tbhdu1.header['AREASCAL']=(1,'Area scaling factor')
tbhdu1.header['BACKSCAL']=(1,'Background scaling factor')
tbhdu1.header['CORRSCAL']=(1,'Correlation scaling factor')
tbhdu1.header['BACKFILE']=('NONE','Background FITS file')
tbhdu1.header['CORRFILE']=('NONE ','Correlation FITS file')
tbhdu1.header['RESPFILE']=('NONE','Redistribution matrix')
tbhdu1.header['ANCRFILE']=('NONE ','Ancillary response')
tbhdu1.header['XFLT0001']=('NONE ','XSPEC selection filter description')
tbhdu1.header['CRPIX1 ']=('(1,'+str(len(channel))+')','Channel binning of the CHANNEL column')
tbhdu1.header['PHAVERSN']=('1992a ','OGIP memo number for file format')
tbhdu1.header['ZERODETX']=(zeroxy[0],'zeroth order position on image')
tbhdu1.header['ZERODETY']=(zeroxy[1],'zeroth order position on image')
hdulist.append(tbhdu1)
try:
hdulist.writeto(backfile2,clobber=clobber)
except:
print("WARNING : NO OUTPUT FILE CREATED. "+backfile2+" EXISTS and CLOBBER not set")
pass
def wr_spec(ra,dec,obsid,ext,hdr,anker,phx,phy,dis,wave,sprate,bgrate,bg1rate,bg2rate,offset,ank_c,extimg, C_1,
quality=None,history=None,chatter=1,clobber=False,interactive=False, fileout=None):
'''helper call to OldwriteSpectrum
fileout to force replacement of OBSID in filenames'''
Y =( hdr,anker,phx,phy,dis,wave,sprate,bgrate,bg1rate,bg2rate,offset,ank_c,extimg, C_1)
return OldwriteSpectrum( ra,dec,obsid,ext, Y, mode=1, quality=quality, history=history, chatter=chatter, \
clobber=clobber,interactive=interactive,fileout=fileout)
def OldwriteSpectrum(ra,dec,filestub,ext, Y, mode=1, quality=None,
updateRMF=False, interactive=False, fileout=None,
arfdbfile=None, arf2file=None, wr_outfile=True,
history=None,chatter=1,clobber=False):
''' write a standard UVOT output file
main header: edited copy of grism det image, history?, RA DEC extracted source;
anchor point, input angles, ank_c, angle,
offset (distance anchor to spectrum) in arcsec
the first extension is named SPECTRUM (future: 'FIRST_ORDER_PHA_SPECTRUM') and
contains the standard input for XSPEC
the second extension is named CALSPEC (future: 'FIRST_ORDER_NET_SPECTRUM')
contains the standard input for IDL/PYTHON with
pixelno(relative to anchor), wave(nm), net_count_rate(s-1 pix-1),
left_background_rate(s-1 pix-1), right_background_rate (s-1 pix-1),
flux(erg cm-2 s-1 A-1), flux_err(erg cm-2 s-1 A-1), quality flag, aper1corr
second order: wave2, net2rate, bg2rate, flux2, flux2err, qual2, aper2corr
The third extension named 'SPECTRUM_IMAGE' contains the image of the total spectrum
The fourth extension may exist for summed images and contains the exposure map
revised 2011-12-10 NPM Kuin, superseded
'''
try:
from astropy.io import fits
except:
import pyfits as fits
import datetime
import numpy as np
from scipy import interpolate
import os
from pylab import polyval
import uvotgetspec
now = datetime.date.today()
rnu = now.day*1.2+now.month*0.99+now.year*0.3
version = '111020'
h_planck = 6.626e-27 # erg/s
lightspeed = 2.9979e10 # cm/sec
h_c_ang = h_planck * lightspeed * 1e8 # ergs-Angstrom
ch = chatter
wave2 = None
sp2netcnt = None
bg2netcnt = None
wave3 = None
quality = None
qflags = uvotgetspec.quality_flags()
################# prepare data for writing to output file
if mode == 0: # uvotcal output === probably obsolete
( (dis, spnet, angle, anker, coef_zmx, pix_zmx, wav_zmx),
(anker_uvw1,anker_as, anker_field,ank_c),
(bg, bg1, bg2, extimg, spimg, spnetimg, offset) ,
(C_zero,C_1,C_2,C_3,C_min1),
(xpix,ypix, zmxdis,zmxwav, wave,theta),
(img, xplines, yplines, dislines, lines), hdr) = Y
a1 = ank_c[1]-370
a2 = ank_c[1]+1200
# temporary hack - need to pass actual total BG counts used for spectrum
spwidth = 13*1.5
if hdr['wheelpos'] > 500: spwidth = 7*1.5
if hdr['wheelpos'] > 500: a2 = ank_c[1]+500
if a2 >= len(dis): a2=len(dis) -1
aa = list(range(a1,a2))
dis = dis[aa]
counts = (spnet+bg)[aa]
bkg = bg[aa]
exposure=hdr['exposure']
wave = polyval(C_1, dis)
NW = len(wave)
ax = list(range(NW-1)) ; ax1=list(range(1,NW))
binwidth = 1.0*(wave[ax1]-wave[ax])
sprate = spnet[aa]/exposure
bgrate = bg[aa]*spwidth/exposure
bg1rate = bg1[aa]*spwidth/exposure
bg2rate = bg2[aa]*spwidth/exposure
phx, phy = anker_field
elif mode == 2: # optimal extraction mode === needs to be updated Y? defs, with apercorr, second order
(Y0,Y1,Y2,Y3,Y4) = Y
(specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
(Xphi, Yphi, date1), (dist12, ankerimg, ZOpos) = Y0
( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
(C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
fit, (coef0,coef1,coef2,coef3), (
bg_zeroth,bg_first,bg_second,bg_third), (
borderup,borderdown), apercorr,expospec = Y2
(present0,present1,present2,present3),(q0,q1,q2,q3),(
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first ,co_first ),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third ,co_third ),(
x,xstart,xend,sp_all,qquality, co_back) = fit
opcounts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
bkg = bg_first[q1[0]]
counts = (opcounts[1,q1[0]]+bkg)
exposure=hdr['exposure']
wave = polyval(C_1, x[q1[0]])
# limit range according to wavelenghts
qwave = np.where( (wave > 1650.0) & (wave < 6800) )
dis = x[q1[0]][qwave]
bkg = bkg[qwave]
counts = counts[qwave]
wave = wave[qwave]
NW = len(wave)
aa = np.arange(NW)
ax = list(range(NW-1)) ; ax1=list(range(1,NW))
binwidth = 1.0*(wave[ax1]-wave[ax])
sprate = (opcounts[1,q1[0]]/exposure)[qwave]
bgrate = bkg/exposure
bg1rate = (bg1[q1[0]]*2*np.polyval(sig1coef,q1[0])/exposure)[qwave]
bg2rate = (bg2[q1[0]]*2*np.polyval(sig1coef,q1[0])/exposure)[qwave]
phx, phy = anker_field
#wave2 = polyval(C_2, x[q2[0]]+dist12)
#sp2netcnt = counts[2,q2[0]]
#bg2cnt = bg_second[q2[0]]
#sp2counts = sp2netcnt + bg2cnt
else: # straight slit only
( hdr,anker,phx,phy,dis,wave,sprate,bgrate,bg1rate,bg2rate,offset,ank_c,extimg, C_1) = Y
a1 = int(ank_c[1])-370
a2 = int(ank_c[1])+1200
# temporary hack - need to pass actual total BG counts used for spectrum
spwidth = 13*1.5
if hdr['wheelpos'] > 500: spwidth = 7*1.5
if hdr['wheelpos'] > 500: a2 = ank_c[1]+500
if a2 >= len(dis): a2=len(dis) -1
aa = list(range(a1,a2))
dis = dis[aa]
sprate = sprate[aa]
bgrate = bgrate[aa]*spwidth
bg1rate = bg1rate[aa]*spwidth
bg2rate = bg2rate[aa]*spwidth
exposure=hdr['exposure']
counts = ((sprate+bgrate)*exposure)
bkg = bgrate*exposure
wave = wave[aa]
NW = len(wave)
ax = list(range(NW-1)) ; ax1=list(range(1,NW))
binwidth = 1.0*(wave[ax1]-wave[ax])
# ensure that offset is a scalar
try:
offset = offset[0]
except:
pass
############### FILES Define the input/ output file names
if arfdbfile == None:
arf_file_passed = False
else:
arf_file_passed = True
obsid = filestub
if fileout != None:
obsid = fileout
if chatter > 2: print("output file name base is now:",obsid)
if hdr['wheelpos'] == 200:
outfile = obsid+'ugu_'+str(ext)+'.pha'
backfile = obsid+'ugu_'+str(ext)+'_back.pha'
respfile = obsid+'ugu_'+str(ext)+'.arf'
if arfdbfile == None: arfdbfile = 'swugu0200_20041120v101.arf'
rmffile = obsid+'ugu_'+str(ext)+'.rmf'
EXTNAME='SPECRESPUGRISM200'
elif hdr['wheelpos'] == 160:
outfile = obsid+'ugu_'+str(ext)+'.pha'
backfile = obsid+'ugu_'+str(ext)+'_back.pha'
respfile = obsid+'ugu_'+str(ext)+'.arf'
if arfdbfile == None: arfdbfile = 'swugu0160_20041120v101.arf'
rmffile = obsid+'ugu_'+str(ext)+'.rmf'
EXTNAME='SPECRESPUGRISM160'
elif hdr['wheelpos'] == 1000:
outfile = obsid+'ugv_'+str(ext)+'.pha'
backfile = obsid+'ugv_'+str(ext)+'_back.pha'
respfile = obsid+'ugv_'+str(ext)+'.arf'
if arfdbfile == None: arfdbfile = 'swugv1000_20041120v101.arf'
rmffile = obsid+'ugv_'+str(ext)+'.rmf'
EXTNAME='SPECRESPVGRISM1000'
elif hdr['wheelpos'] == 955:
outfile = obsid+'ugv_'+str(ext)+'.pha'
backfile = obsid+'ugv_'+str(ext)+'_back.pha'
respfile = obsid+'ugv_'+str(ext)+'.arf'
if arfdbfile == None: arfdbfile = 'swugv0955_20041120v101.arf'
rmffile = obsid+'ugv_'+str(ext)+'.rmf'
EXTNAME='SPECRESPVGRISM955'
else:
print("FATAL: exposure header does not have filterwheel position encoded")
return
obsid = filestub
# test for presence of outfile and clobber not set
if clobber == False:
if os.access(outfile,os.F_OK) ^ os.access(backfile,os.F_OK) ^ os.access(outfile,os.F_OK) ^ os.access(backfile,os.F_OK):
print('Error: output file already present. ')
if interactive:
answer = input(' DO YOU WANT TO REWRITE THE OUTPUT FILES (answer yes/NO)? ')
if len(answer1) < 1: answer = 'NO'
answer = answer.upper()
if (answer == 'Y') ^ (answer == 'YES'): clobber = True
if clobber == False: return
filetag = obsid+'_'+str(hdr['wheelpos'])+str(rnu)
#
# get spectral response [cm**2] from the ARF file
#
specresp1func = SpecResp(hdr['wheelpos'], 1, )
if present2:
specresp2func = SpecResp(hdr['wheelpos'], 2, )
# sort out the inputs and prepare the arrays to write.
if mode == 0:
channel = list(range(len(aa)))
rc = list(range(len(aa))) ; rc.reverse()
channel = np.array(channel)
stat_err = (np.sqrt(counts))[rc]
counts = counts[rc]
if quality == None:
quality = np.zeros(len(aa)) # see definition from OGIP
q = np.where(counts < 0)
if q[0].shape != (0,) :
if q[0][0] != -1:
stat_err[q] = 0.
quality[q] = 5
else:
quality = (quality[aa])[rc]
cpp2fpa = 1 # count2flux(dis,wave,wheelpos,anker) count rate per pixel to flux per angstroem
#flux = cpp2fpa*sprate
sprate = spnet[aa]/exposure
sprate_err = np.sqrt(spnet[aa]+2*bkg)/exposure # poisson error net rate
dlam_per_pix = wave*0. ; dlam_per_pix[ax]=binwidth ; dlam_per_pix[-1]=binwidth[-1]
hnu = h_c_ang/(wave)
flux = hnu*sprate/specrespfunc(wave)/dlam_per_pix # [erg/s/cm2/angstrom]
flux_err = hnu*sprate_err/specrespfunc(wave)/dlam_per_pix
else:
channel = list(range(len(wave)))
rc = list(range(len(wave)))
rc.reverse()
if present2:
channel2 = list(range(len(wave2)))
rc2 = list(range(len(wave2)))
rc2.reverse()
channel = np.array(channel)
stat_err = (np.sqrt(counts))[rc] # poisson error on counts (no background)
counts = counts[rc]
if present2:
channel2 = np.array(channel2)
stat_err = (np.sqrt(sp2counts))[rc2]
sp2counts = sp2counts[rc2]
if quality == None:
quality = np.zeros(len(aa)) # see definition from OGIP
quality2 = quality
else:
quality = quality[rc]
quality[np.where(quality != 0)] = 2 # see definition from OGIP
quality2 = quality
q = np.where(counts < 0)
if q[0].shape != (0,) :
if q[0][0] != -1:
stat_err[q] = 0.
quality[q] = qflags['bad']
cpp2fpa = 1 # count2flux(dis,wave,wheelpos,anker) count rate per pixel to flux per angstroem
#flux = cpp2fpa*sprate
if mode != 1:
sprate_err = (np.sqrt(sprate+2*bgrate))/exposure # poisson error on net counts
dlam_per_pix = wave*0. ; dlam_per_pix[ax]=binwidth ; dlam_per_pix[-1]=binwidth[-1]
else:
sprate_err = sp1rate_err
dlam_per_pix = binwidth
hnu = h_c_ang/wave
flux = hnu*sprate/specrespfunc(wave)/dlam_per_pix # [erg/s/cm2/angstrom]
flux_err = hnu*sprate_err/specrespfunc(wave)/dlam_per_pix
if present2:
if arf2file != None:
flux2 = hnu*sp2rate/specrespfunc2(wave)/dlam_per_pix2 # [erg/s/cm2/angstrom]
flux2_err = hnu*sp2rate_err/specrespfunc2(wave)/dlam_per_pix2
else:
flux2 = sp2rate*0
flux2_err = sp2rate_err*0
if Y4 != None:
if arf2file != None:
flux2p = hnu*rate2p/specrespfunc2(wave2p)/dlam_per_pix2
else:
flux2p = rate2p*0
spectrum_first = (channel, counts, star_err, quality, np.ones(len(channel)), )
back_first = (channel, bkg, sqrt(bkg), quality, np.ones(len(channel)) )
calspec_first = (dis,wave,sprate,bg1rate,bg2rate,flux,flux_err,quality, np.ones(len(channel)) )
if wr_outfile:writeSpectrum_ (ra,dec,obsid,ext,hdr,anker,phx,phy,offset, ank_c, exposure,
history, spectrum_first, back_first, calspec_first, extimg, outfile,
backfile, rmffile, outfile2=None, backfile2=None, rmffile2=None, expmap=None,
spectrum_second = None, back_second = None, calspec_second=None, present2=False,
clobber=False, chatter=chatter )
# dothefile(ra,dec,obsid,ext,hdr,anker,phx,phy,offset,extimg,ank_c,\
# channel,counts,stat_err,quality,filetag,backfile,outfile,rmffile,\
# dis,wave,sprate,bg1rate,bg2rate,flux,flux_err, aper1corr, dis2, flux2,flux2_err, qual2\
# ,x,q,present2,wave2,sp2counts,sp2netcnt,exposure,sp2rate,sp2rate_err,bg_2rate,wave3,\
# bkg,rc,aa,C_1, pext,ax,ax1, specrespfunc,updateRMF,EXTNAME,history, clobber)
return flux, flux_err
def updateResponseMatrix(rmffile, C_1, clobber=True, lsffile='zemaxlsf', chatter=0):
""" modify the response matrix lineprofiles
using the zemax model prediction from zemaxlsf.fit
In addition the zemax profile is broadened by the instrumental
broadening of 2.7 pixels.
Parameters
----------
rmffile : path, str
The rmffile is updated by default
C_1: ndarray
The dispersion C_1 is used to convert pixels to angstroms.
kwargs : dict
- *lsffile* : path
The lsffile is in the $UVOTPY/calfiles directory
- *clobber* : bool
overwrite output.
- *chatter* : int
verbosity
Returns
-------
writes RMF file
Notes
-----
2011-09-02 Initial version NPMK (UCL/MSSL)
2011-12-16 Uncorrected error: LSF is inverted in energy scale.
2015-02-09 This routine is considered OBSOLETE now
Notes
-----
The same algorithm was implemented in the write_rmf_file() routine which does not
need the input rmf file produced by the "Ftool" *rmfgen*.
write_rmf_file was rewritten 2015-02-08
"""
try:
from astropy.io import fits
except:
import pyfits as fits
import numpy as np
import os
from scipy.ndimage import convolve
#from convolve import boxcar
import uvotio
import uvotgetspec
from scipy import interpolate
# instrument_fwhm = 2.7/0.54 # pix
ww = uvotgetspec.singlegaussian(np.arange(-12,12),1.0,0.,2.7/0.54)
ww = ww/ww.sum()
UVOTPY = os.getenv('UVOTPY') + '/uvotpy'
if UVOTPY == '':
print('The UVOTPY environment variable has not been set')
lsffile = fits.open( UVOTPY+'/calfiles/zemaxlsf.fit' )
tlsf = lsffile[1].data
lsfchan = tlsf.field('channel')[0:15] # energy value
lsfwav = uvotio.kev2angstrom(lsfchan)
epix = tlsf.field('epix')[0,:]
lsfdata = tlsf.field('lsf')[:15,:]
lsffile.close()
hdulist = fits.open( rmffile, mode='update' )
hdu = hdulist['MATRIX']
matrix = hdu.data.field('MATRIX')
n5 = len(matrix)
k5 = len(matrix[0])
resp = np.zeros(n5)
for i in range(n5): resp[i] = matrix[i].sum()
energ_lo = hdu.data.field('energ_lo')
energ_hi = hdu.data.field('energ_hi')
n_grp = hdu.data.field('n_grp')
f_chan = hdu.data.field('f_chan')
n_chan = hdu.data.field('n_chan')
e_mid = 0.5*(energ_lo+energ_hi)
wav = kev2angstrom( e_mid )
wav_lo = kev2angstrom( energ_hi )
wav_hi = kev2angstrom( energ_lo )
dis = np.arange(-370,1150)
C_1inv = uvotgetspec.polyinverse(C_1, dis)
d_lo = np.polyval(C_1inv,wav_lo)
d_hi = np.polyval(C_1inv,wav_hi)
for k in range(len(e_mid)):
if ((e_mid[k] != 0) | (np.isfinite(resp[k]))):
# find index e in lsfchan and interpolate lsf
w = wav[k]
j = lsfwav.searchsorted(w)
if j == 0:
lsf = lsfdata[0,:]
elif ((j > 0) & (j < 15) ):
e1 = lsfchan[j-1]
e2 = lsfchan[j]
frac = (e_mid[k]-e1)/(e2-e1)
lsf1 = lsfdata[j-1,:]
lsf2 = lsfdata[j,:]
lsf = (1-frac) * lsf1 + frac * lsf2
else:
# j = 15
lsf = lsfdata[14,:]
# convolution lsf with instrument_fwhm and multiply with response
lsf_con = convolve(lsf,ww.copy(),)
# assign wave to lsf array relative to w at index k in matrix (since on diagonal)
# rescale lsfcon from pixels to channels
d = np.arange(-79,79) + int(np.polyval(C_1inv, w))
wave = np.polyval(C_1,d)
ener = uvotio.angstrom2kev(wave)
# now each pixel has a wave, energy(keV) and lsf_con value
# new array to fill
lsfnew = np.zeros(k5)
ener_ = list(ener)
lsf_con_ = list(lsf_con)
ener_.reverse()
lsf_con_.reverse()
# now we have ener as an increasing function - if not, the function fails.
inter = interpolate.interp1d(ener_, lsf_con_,bounds_error=False,fill_value=0.0)
for i in range(k5):
lsfnew[i] = inter( e_mid[i])
lsfnew_norm = lsfnew.sum()
if (np.isnan(lsfnew_norm)) | (lsfnew_norm <= 0.0): lsfnew_norm = 5.0e9
lsfnew = ( lsfnew / lsfnew_norm) * resp[k]
matrix[k] = lsfnew
if (k/48*48 == k): print('processed up to row --> ',k)
#hdulist.update()
if chatter > 1: print("updating the LSF in the response file now")
hdu.header['HISTORY']=('Updated LSF by uvotgetspec.uvotio.updateResponseMatrix()')
hdulist.update_tbhdu()
print('updated')
hdulist.verify()
hdulist.flush()
hdulist.close()
def make_rmf(phafile,rmffile=None,spectral_order=1,
lsfVersion='001',
clobber=False,chatter=1):
"""
Make the rmf file after writing the extracted spectrum file.
Parameters
----------
phafile : path
name of the pha file
rmffile : path (optional)
name of the rmf file
default is <phafile root>.rmf
Notes
-----
The response file can be used with the PHA file (SPECTRUM extension)
to analyse the spectrum using SPEX or XSPEC.
This is inappropriate for summed spectra made using sum_PHAspectra.
but a future revision of that routine is planned to write the
correct extension for this process.
"""
import numpy as np
from astropy.io import fits
import uvotmisc
if rmffile == None: rmffile=phafile.split(".pha")[0]+".rmf"
f = fits.open(phafile,mode='update')
if len(f) < 4:
raise IOError("The pha input file seems not correct.\n"+
"The input file needs to be that for order 1, even to compute the rmf for order 2\n")
try:
if spectral_order == 1:
wave = f['CALSPEC'].data['lambda']
elif spectral_order == 2:
wave = f['CALSPEC'].data['lambda2']
print("THE RMF for the second order are DUMMY values taken from first order")
else: raise IOError("Illegal value for spectral_order")
wheelpos = f['SPECTRUM'].header['wheelpos']
hdr = f['SPECTRUM'].header
disp = uvotmisc.get_dispersion_from_header(hdr, order=spectral_order)
hist = hdr['history']
anc = uvotmisc.get_keyword_from_history(hist,'anchor1')
anchor = np.array(anc.split('(')[1].split(')')[0].split(','),dtype=float)
if chatter > 2:
print("call write_rmf_file parameters are :")
print("rmffile=",rmffile," wheelpos=", wheelpos," disp=", disp)
print("lsfVersion=",lsfVersion," anchor=",anchor)
print("wave : ", wave)
write_rmf_file(rmffile,wave,wheelpos,disp,anchor=anchor,
lsfVersion=lsfVersion,
chatter=chatter,clobber=clobber)
# update the phafile header with rmffile name
f['SPECTRUM'].header['RESPFILE'] = (rmffile,"RMF file name")
f.close()
except:
print("ERROR in call write_rmf_file. Trying again ... ")
write_rmf_file(rmffile,wave,wheelpos,disp,anchor=anchor,
lsfVersion=lsfVersion,
chatter=chatter,clobber=clobber)
f.close()
raise RuntimeError("The file is not a correct PHA file")
def write_rmf_file (rmffilename, wave, wheelpos, disp,
flux = None,
anchor=[1000,1000], # only that one is currently available
spectralorder = 1, # not possible for second order yet
effarea1=None, effarea2=None,
lsfVersion='001', msg="",
chatter=1, clobber=False ):
'''
Write the RMF file for the first order spectrum
Parameters
----------
rmffile : path, str
file name output file
wave : ndarray
mid-wavelengths of the bins in the spectrum
flux : ndarray [default None]
used to omit channels of invalid (NaN) or negative
flux values from the response file
wheelpos : int
filter wheel position
disp : ndarray
dispersion coefficients
anchor : 2-element list
The anchor position is used to select the correct
effective area (important for the clocked grism modes).
spectralorder : 1
** Do not change **
Only for the first order the RMF can currently be
computed.
effarea1, effarea2 : hdu, interpolating function
do not use unless you know what you're doing
lsfVersion : ['001','003']
version number of the LSF file to be used.
chatter : int
verbosity
clobber : bool
if true overwrite output file if it already exists
Returns
-------
Writes the RMF file
Notes
-----
The count rate has been corrected for coincidence loss (version 2 SPECTRUM extension).
The spectral response varies in the clocked grisms, is nearly constant in the
nominal grisms. Therefore, in the clocked grisms, the rmf file needs
to be created specifically for the specific spectrum.
The rmf files have also energy bins corresponding to the wavelength
bins in the spectrum. These also show some variation from spectrum to
spectrum.
The line spread function from the uv grism at default position is
currently used for all computations. Since the RMF file encodes also
the effective area, this version presumes given anchor position.
2014-02-27 code cleaned up. Speed depends on number of points
2015-02-02 versioning lsf introduced; changed instrument FWHM values
2015-02-04 error found which affects the longer wavelengths (> 3500A)
2015-02-04 verified the LSF with a calibration spectrum, which shows
instrumental broadening of 10+-1 Angstrom and at long
wavelengths (6560) the same broadening predicted by the
Zemax optical model.
2015-02-09 There was a major overhaul of this routine, which is now
much improved.
2015-02-13 remove trimming of channels
'''
try:
from astropy.io import fits
except:
import pyfits as fits
import numpy as np
import os
from scipy.ndimage import convolve
import uvotio
import uvotgetspec
from scipy import interpolate
import datetime
version = '150208'
if not ((lsfVersion == '001') | (lsfVersion == '002') | (lsfVersion == '003') ):
raise IOError("please update the calfiles directory with new lsf file.")
now = datetime.date.today()
datestring = now.isoformat()[0:4]+now.isoformat()[5:7]+now.isoformat()[8:10]
if chatter > 0: print("computing RMF file.")
# telescope and image intensifier broadening
if lsfVersion == '001':
if wheelpos < 500:
instrument_fwhm = 2.7 # Angstroms in units of pix
else:
instrument_fwhm = 5.8 # Angstroms in units of pix
# get the effective area for the grism mode, anchor position and order at each wavelength
if effarea1 != None:
if len(effarea1) == 2:
hdu, fnorm = effarea1
w = 0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX'])
fnorm = fnorm(w)
else:
hdu = effarea1
w = 0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX'])
fnorm = 1
else:
hdu, fnorm, msg = uvotio.readFluxCalFile(wheelpos,anchor=anchor,
spectralorder=spectralorder,msg=msg, chatter=chatter)
w = 0.5*(hdu.data['WAVE_MIN']+hdu.data['WAVE_MAX'])
fnorm = fnorm(w)
r = hdu.data['SPECRESP']
ii = list(range(len(w)-1,-1,-1))
w = w[ii]
r = r[ii]
r = r * fnorm
specrespfunc = interpolate.interp1d( w, r, bounds_error=False, fill_value=0.0 )
# exclude channels with no effective area
#wave = wave[(wave >= np.min(w)) & (wave <= np.max(w))]
# exclude channels that have bad data
#if flux != None:
# wave = wave[(np.isfinite(flux) & (flux >= 0.))]
NN = len(wave) # number of channels in spectrum (backward since we will use energy)
if NN < 20:
print("write_rmf_file: not enough valid data points.\n"+\
" No rmf file written for wheelpos=",wheelpos,", order=",spectralorder)
return
iNL = np.arange(0,NN,1,dtype=int) # index energy array spectrum
NL = len(iNL) # number of sample channels
aa = uvotgetspec.pix_from_wave(disp, wave, spectralorder=spectralorder) # slow !
tck_Cinv = interpolate.splrep(wave,aa,) # B-spline coefficients to look up pixel position (wave)
channel = list(range(NN))
aarev = list(range(NN-1,-1,-1)) # reverse channel numbers (not 1-offset)
channel = np.array(channel) + 1 # start channel numbers with 1
# spectral response as function of energy
resp = specrespfunc(wave[aarev])
# wavelengths bounding a pixel
wave_lo = np.polyval(disp,aa-0.5) # increasing
wave_hi = np.polyval(disp,aa+0.5)
# corresponding energy channels
energy_lo = uvotio.angstrom2kev(wave_hi[aarev]) # increasing energy channels (index reverse)
energy_hi = uvotio.angstrom2kev(wave_lo[aarev])
#energy_mid = uvotio.angstrom2kev(wave[aarev])
e_mid = 0.5*(energy_lo+energy_hi) # increasing energy
# channel (integer) pixel positions (spectrum)
d_lo = np.array(interpolate.splev(wave_lo, tck_Cinv,) + 0.5,dtype=int)
d_hi = np.array(interpolate.splev(wave_hi, tck_Cinv,) + 0.5,dtype=int)
d_mid = np.array(interpolate.splev(wave[aarev], tck_Cinv,) + 0.5,dtype=int) # increasing energy
# output arrays
n_grp = np.ones(NN)
f_chan = np.ones(NN)
n_chan = np.ones(NN) * NN
matrix = np.zeros( NN*NN, dtype=float).reshape(NN,NN)
# (only for original version pre-2015-02-05) instrumental profile gaussian assuming first order
# second order needs attention: instrument + LSF
if lsfVersion == '001':
ww = uvotgetspec.singlegaussian(np.arange(-12,12),1.0,0.,instrument_fwhm)
ww = ww/ww.sum().flatten() # normalised gaussian
# get the LSF data for selected wavelengths/energies
lsfwav,lsfepix,lsfdata,lsfener = _read_lsf_file(lsfVersion=lsfVersion,wheelpos=wheelpos,)
#�provide a spectral response for NN energies
for k in iNL: # increasing energy
# find the LSF for the current energy channel
lsf = _interpolate_lsf(e_mid[k],lsfener,lsfdata,lsfepix,)
# convolution lsf with instrument_fwhm
if lsfVersion == '001':
lsf = convolve(lsf,ww.copy(),)
# lsf should already be normalised normalised to one
# assign wave to lsf array relative to w at index k in matrix (since on diagonal)
# rescale lsf from half-pixels (centre is on a boundary) to channels
# find the range in pixels around the e_mid pixel at index k
pdel = len(lsfepix)/2/2 # one-way difference; half->whole pixels
# where to put in matrix row :
qrange = np.arange( np.max([k-pdel,0]), np.min([k+pdel,NN]), 1,dtype=int)
# skipping by lsfepix twos so that we can add neighboring half-pixel LSF
qpix = np.arange(pdel*4-2,-1,-2,dtype=int)
matrixrow = np.zeros(NN)
matrixrow[qrange] = lsf[qpix]+lsf[qpix+1]
# centre is in middle of a channel
matrix[k,:] = matrixrow*resp[k]
# for output
if wheelpos < 500:
filtername = "UGRISM"
else:
filtername = "VGRISM"
if chatter > 0 : print("writing RMF file")
hdu = fits.PrimaryHDU()
hdulist=fits.HDUList([hdu])
hdulist[0].header['TELESCOP']=('SWIFT ','Telescope (mission) name')
hdulist[0].header['INSTRUME']=('UVOTA ','Instrument Name')
hdulist[0].header['COMMENT'] ="revision 2015-02-08, version 003"
col11 = fits.Column(name='ENERG_LO',format='E',array=energy_lo,unit='KeV')
col12 = fits.Column(name='ENERG_HI',format='E',array=energy_hi,unit='KeV')
col13 = fits.Column(name='N_GRP',format='1I',array=n_grp,unit='None')
col14 = fits.Column(name='F_CHAN',format='1I',array=f_chan,unit='None')
col15 = fits.Column(name='N_CHAN',format='1I',array=n_chan,unit='None' )
col16 = fits.Column(name='MATRIX',format='PE(NN)',array=matrix,unit='cm**2' )
cols1 = fits.ColDefs([col11,col12,col13,col14,col15,col16])
tbhdu1 = fits.BinTableHDU.from_columns(cols1)
tbhdu1.header['EXTNAME'] =('MATRIX','Name of this binary table extension')
tbhdu1.header['TELESCOP']=('SWIFT','Telescope (mission) name')
tbhdu1.header['INSTRUME']=('UVOTA','Instrument name')
tbhdu1.header['FILTER'] =(filtername,'filter name')
tbhdu1.header['CHANTYPE']=('PI', 'Type of channels (PHA, PI etc)')
tbhdu1.header['HDUCLASS']=('OGIP','format conforms to OGIP standard')
tbhdu1.header['HDUCLAS1']=('RESPONSE','RESPONSE DATA')
tbhdu1.header['HDUCLAS2']=('RSP_MATRIX','contains response matrix')
tbhdu1.header['HDUCLAS3']=('FULL','type of stored matrix')
tbhdu1.header['HDUVERS'] =('1.3.0','version of the file format')
tbhdu1.header['ORIGIN'] =('UVOTPY revision 2015-02-08','source of FITS file')
tbhdu1.header['TLMIN4'] =( 1, 'First legal channel number')
tbhdu1.header['TLMAX4'] =(NN, 'Last legal channel number')
tbhdu1.header['NUMGRP'] =(NN, 'Sum of the N_GRP column')
tbhdu1.header['NUMELT'] =(NN, 'Sum of the N_CHAN column')
tbhdu1.header['DETCHANS']=(NN, 'Number of raw detector channels')
tbhdu1.header['LO_THRES']=(1.0E-10, 'Minimum value in MATRIX column to apply')
tbhdu1.header['DATE'] =(now.isoformat(), 'File creation date')
hdulist.append(tbhdu1)
col21 = fits.Column(name='CHANNEL',format='I',array=channel,unit='channel')
col22 = fits.Column(name='E_MIN',format='E',array=energy_lo,unit='keV')
col23 = fits.Column(name='E_MAX',format='E',array=energy_hi,unit='keV')
cols2 = fits.ColDefs([col21,col22,col23])
tbhdu2 = fits.BinTableHDU.from_columns(cols2)
tbhdu2.header['EXTNAME'] =('EBOUNDS','Name of this binary table extension')
tbhdu2.header['TELESCOP']=('SWIFT','Telescope (mission) name')
tbhdu2.header['INSTRUME']=('UVOTA','Instrument name')
tbhdu2.header['FILTER'] =(filtername,'filter name')
tbhdu2.header['CHANTYPE']=('PI', 'Type of channels (PHA, PI etc)')
tbhdu2.header['HDUCLASS']=('OGIP','format conforms to OGIP standard')
tbhdu2.header['HDUCLAS1']=('RESPONSE','RESPONSE DATA')
tbhdu2.header['HDUCLAS2']=('EBOUNDS','type of stored matrix')
tbhdu2.header['HDUVERS'] =('1.2.0','version of the file format')
tbhdu2.header['DETCHANS']=(NN, 'Number of raw detector channels')
tbhdu2.header['TLMIN1'] =( 1, 'First legal channel number')
tbhdu2.header['TLMAX1'] =(NN, 'Last legal channel number')
tbhdu2.header['DATE'] =(now.isoformat(), 'File creation date')
hdulist.append(tbhdu2)
hdulist.writeto(rmffilename,clobber=clobber)
def _interpolate_lsf(en,lsfener,lsfdata,lsfepix,):
"""
interpolate the LSF data for a different energy
parameters
===========
en : float (not a list/array)
energy (keV) for wavelength for which LSF is desired
lsfwav : numpy array
list of wavelengths at which we have LSF
lsfepix : numpy array
for given wavelength, give LSF value for a channel
the size of each channel is 0.5 pixel
lsfdata : numpy array
2-d array. first index relates to lsfwav, second to channels
returns
=======
lsf[channels] for wavelength w
method
========
the LSF data near w are linearly interpolated for each channel
"""
import numpy as np
if not ((type(en) == float) | (type(en) == np.float32)):
print("en = ",en)
print("type en = ", type(en))
raise IOError("_interpolate_lsf only works on one *en* element at a time")
# find index of the nearest LSF
indx = np.argsort(lsfener) # indices
jj = lsfener.searchsorted(en,sorter=indx)
j = indx[jj-1]
k = lsfener.shape[0]
if j == 0:
lsf = lsfdata[0,:].flatten()
elif ((j > 0) & (j < k) ):
e1 = lsfener[j-1]
e2 = lsfener[j]
frac = (en-e1)/(e2-e1)
lsf1 = lsfdata[j-1,:].flatten()
lsf2 = lsfdata[j,:].flatten()
lsf = ((1-frac) * lsf1 + frac * lsf2)
else:
lsf = lsfdata[k-1,:].flatten()
return lsf
def _read_lsf_file(lsfVersion='003',wheelpos=160,):
"""
2015-08-19 error in lsfepix order of row and column?
"""
import os
from astropy.io import fits
import uvotio
UVOTPY = os.getenv('UVOTPY') + '/uvotpy'
if UVOTPY == '':
raise IOError( 'The UVOTPY environment variable has not been set; aborting RMF generation ')
if lsfVersion == '001':
try:
lsffile = fits.open( UVOTPY+'/calfiles/zemaxlsf0160_v001.fit' )
except:
print("WARNING: the oldest zemaxlsf calfile has been read in (= wheelpos 160; version 001)")
lsffile = fits.open( UVOTPY+'/calfiles/zemaxlsf.fit' )
else:
# in later versions the instrumental broadening is already included in the Line Spread Function
lsffile = fits.open( UVOTPY+'/calfiles/zemaxlsf0160_v'+lsfVersion+'.fit' )
if wheelpos < 500:
lsfextension = 1
else:
print("using the LSF model of the UV grism for the Visible grism "+\
"until such time as the LSF model for the visible grism can be incorporated")
lsfener = lsffile[1].data['channel'][0:15] # energy value (keV)
lsfepix = lsffile[1].data['epix'][0,:] # 156 or 158 values - offset in half pixels (to be converted to wave(wave))
lsfdata = lsffile[1].data['lsf'][:15,:] # every half pixel a value - - to resolve at shortest wavelengths
lsfwav = uvotio.kev2angstrom(lsfener) # LSF wavelength
lsflen = lsfdata.shape[1]
lsffile.close()
return lsfwav,lsfepix,lsfdata,lsfener
| 42.048151 | 153 | 0.605488 |
ace65f4904a64e26846aeeee0e093af4415ace75 | 521 | py | Python | venv/Lib/site-packages/sklearn/mixture/bayesian_mixture.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/sklearn/mixture/bayesian_mixture.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/sklearn/mixture/bayesian_mixture.py | Jos33y/student-performance-knn | 4e965434f52dd6a1380904aa257df1edfaebb3c4 | [
"MIT"
] | null | null | null |
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
from . import _bayesian_mixture
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.mixture.bayesian_mixture'
correct_import_path = 'sklearn.mixture'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_bayesian_mixture, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
| 28.944444 | 71 | 0.792706 |
ace65f597d3fc2f7cfaafbee6d36f41f2d77fbe5 | 686 | py | Python | accounts/url_reset.py | Code-Institute-Submissions/ShaneMuir-milestone-5 | 705772054da5f08b102e6fe0009100a889180e64 | [
"MIT"
] | 1 | 2019-02-18T22:03:17.000Z | 2019-02-18T22:03:17.000Z | accounts/urls_reset.py | abonello/django-e-commerce | b8832eb9e663a40f3b5defd1f491164fc019523a | [
"blessing"
] | 6 | 2020-06-05T18:35:34.000Z | 2022-03-11T23:25:38.000Z | accounts/url_reset.py | ShaneMuir/BugFeature-Tracker | a93f65e48b38c9106a627f22a410cd4edbc26aa5 | [
"MIT"
] | 1 | 2019-03-01T17:03:27.000Z | 2019-03-01T17:03:27.000Z | from django.conf.urls import url
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.views import password_reset, password_reset_done, password_reset_confirm, password_reset_complete
urlpatterns = [
url(r'^$', password_reset,
{'post_reset_redirect': reverse_lazy('password_reset_done')}, name='password_reset'),
url(r'^done/$', password_reset_done, name='password_reset_done'),
url(r'^(?P<uidb64>[0-9A-Za-z]+)-(?P<token>.+)/$', password_reset_confirm,
{'post_reset_redirect': reverse_lazy('password_reset_complete')}, name='password_reset_confirm'),
url(r'^complete/$', password_reset_complete, name='password_reset_complete'),
]
| 52.769231 | 122 | 0.750729 |
ace6607aef43491e76cd38c46e1d03656dd422bc | 1,425 | py | Python | jp.atcoder/abc011/abc011_4/26240599.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc011/abc011_4/26240599.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc011/abc011_4/26240599.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
import typing
import numba as nb
import numpy as np
# @nb.njit
# def choose_pascal(n: int) -> np.ndarray:
# c = np.zeros((n, n), np.float64)
# c[:, 0] = 1
# for i in range(1, n):
# for j in range(1, i + 1):
# c[i, j] = c[i - 1, j] + c[i - 1, j - 1] / 4
# return c
def choose_pascal(n: int) -> typing.List[typing.List[int]]:
c = [[0] * n for _ in range(n)]
for i in range(n):
c[i][0] = 1
for i in range(1, n):
for j in range(1, i + 1):
c[i][j] = c[i - 1][j] + c[i - 1][j - 1] / 4
return c
# @nb.njit((nb.i8, nb.i8, nb.i8, nb.i8), cache=True)
def solve(n: int, d: int, x: int, y: int) -> typing.NoReturn:
if x % d or y % d:
print(0)
return
x //= d
y //= d
if n < x + y or (n - x - y) & 1:
print(0)
return
p = choose_pascal(1 << 10)
k = n - x - y
tot = 0
for i in range(0, k + 1, 2):
d = i // 2
u = y + d
l = (k - i) // 2
r = x + l
# tmp = p[n, d] * p[n - d, u]
# tmp *= p[n - d - u, l] * p[n - d - u - l, r]
tmp = p[n][d] * p[n - d][u]
tmp *= p[n - d - u][l] * p[n - d - u - l][r]
tot += tmp
print(tot)
def main() -> typing.NoReturn:
n, d = map(int, input().split())
x, y = map(int, input().split())
solve(n, d, x, y)
main()
| 22.265625 | 62 | 0.404912 |
ace660fca776e5e773ed2358cc92dc275e79d1ab | 130 | py | Python | examples/jobtools/job_function.py | slouchart/pyetllib | 133df36a1628f413cd60a86e4c7eac2738844d17 | [
"MIT"
] | 2 | 2020-04-01T10:08:02.000Z | 2021-03-07T15:18:14.000Z | examples/jobtools/job_function.py | slouchart/pyetllib | 133df36a1628f413cd60a86e4c7eac2738844d17 | [
"MIT"
] | null | null | null | examples/jobtools/job_function.py | slouchart/pyetllib | 133df36a1628f413cd60a86e4c7eac2738844d17 | [
"MIT"
] | 1 | 2020-10-13T13:23:02.000Z | 2020-10-13T13:23:02.000Z | from pyetllib.jobtools import Job
def job(job_ref):
job_ref.info('OK')
job = Job(func=job, use_job=True)
Job.execute(job)
| 13 | 33 | 0.707692 |
ace6611666a35769ceeeb73c2fc89f74c21c642b | 2,705 | py | Python | stream/clients/python/setup.py | crazile-dot/bookkeeper | 2f6dbdf294aca81c777ae4c20f8f38af64702d07 | [
"Apache-2.0"
] | 1 | 2021-08-21T22:58:18.000Z | 2021-08-21T22:58:18.000Z | stream/clients/python/setup.py | crazile-dot/bookkeeper | 2f6dbdf294aca81c777ae4c20f8f38af64702d07 | [
"Apache-2.0"
] | null | null | null | stream/clients/python/setup.py | crazile-dot/bookkeeper | 2f6dbdf294aca81c777ae4c20f8f38af64702d07 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
# Package metadata.
name = 'apache-bookkeeper-client'
description = 'Apache BookKeeper client library'
version = '4.12.0'
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = 'Development Status :: 3 - Alpha'
dependencies = [
'protobuf>=3.0.0',
'setuptools>=34.0.0',
'six>=1.10.0',
'pytz',
'futures>=3.2.0;python_version<"3.2"',
'grpcio<1.28,>=1.8.2',
'pymmh3>=0.0.5'
]
extras = {
}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, 'README.rst')
with io.open(readme_filename, encoding='utf-8') as readme_file:
readme = readme_file.read()
# Only include packages under the 'bookkeeper' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package for package in setuptools.find_packages()
if package.startswith('bookkeeper')]
# Determine which namespaces are needed.
namespaces = ['bookkeeper']
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author='Apache BookKeeper',
author_email='dev@bookkeeper.apache.org',
license='Apache 2.0',
url='https://github.com/apache/bookkeeper/tree/master/stream/clients/python',
classifiers=[
release_status,
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Topic :: Internet',
],
platforms='Posix; MacOS X; Windows',
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
include_package_data=True,
zip_safe=False,
)
| 30.738636 | 81 | 0.686137 |
ace66167e98bbb3583a5e8add15e432aade66023 | 9,770 | py | Python | sandman2/service.py | amoncadap/seprosandman2 | 319807c55bc8ad8ab305fa375ef8e0f34fb47dcf | [
"Apache-2.0"
] | null | null | null | sandman2/service.py | amoncadap/seprosandman2 | 319807c55bc8ad8ab305fa375ef8e0f34fb47dcf | [
"Apache-2.0"
] | null | null | null | sandman2/service.py | amoncadap/seprosandman2 | 319807c55bc8ad8ab305fa375ef8e0f34fb47dcf | [
"Apache-2.0"
] | null | null | null | """Automatically generated REST API services from SQLAlchemy
ORM models or a database introspection."""
# Third-party imports
from flask import request, make_response
import flask
from flask.views import MethodView
from flask_cors import CORS, cross_origin
# Application imports
from sandman2.exception import NotFoundException, BadRequestException
from sandman2.model import db
from sandman2.decorators import etag, validate_fields
def add_link_headers(response, links):
"""Return *response* with the proper link headers set, based on the contents
of *links*.
:param response: :class:`flask.Response` response object for links to be
added
:param dict links: Dictionary of links to be added
:rtype :class:`flask.Response` :
"""
link_string = '<{}>; rel=self'.format(links['self'])
for link in links.values():
link_string += ', <{}>; rel=related'.format(link)
response.headers['Link'] = link_string
return response
def jsonify(resource):
"""Return a Flask ``Response`` object containing a
JSON representation of *resource*.
:param resource: The resource to act as the basis of the response
"""
response = flask.jsonify(resource.to_dict())
response = add_link_headers(response, resource.links())
return response
def is_valid_method(model, resource=None):
"""Return the error message to be sent to the client if the current
request passes fails any user-defined validation."""
validation_function_name = 'is_valid_{}'.format(
request.method.lower())
if hasattr(model, validation_function_name):
return getattr(model, validation_function_name)(request, resource)
class Service(MethodView):
"""The *Service* class is a generic extension of Flask's *MethodView*,
providing default RESTful functionality for a given ORM resource.
Each service has an associated *__model__* attribute which represents the
ORM resource it exposes. Services are JSON-only. HTML-based representation
is available through the admin interface.
"""
#: The sandman2.model.Model-derived class to expose
__model__ = None
#: The string used to describe the elements when a collection is
#: returned.
__json_collection_name__ = 'resources'
def delete(self, resource_id):
"""Return an HTTP response object resulting from a HTTP DELETE call.
:param resource_id: The value of the resource's primary key
"""
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().delete(resource)
db.session().commit()
return self._no_content_response()
@etag
def get(self, resource_id=None):
"""Return an HTTP response object resulting from an HTTP GET call.
If *resource_id* is provided, return just the single resource.
Otherwise, return the full collection.
:param resource_id: The value of the resource's primary key
"""
if request.path.endswith('meta'):
return self._meta()
if resource_id is None:
error_message = is_valid_method(self.__model__)
if error_message:
raise BadRequestException(error_message)
if 'export' in request.args:
return self._export(self._all_resources())
return flask.jsonify({
self.__json_collection_name__: self._all_resources()
})
else:
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
return jsonify(resource)
def patch(self, resource_id):
"""Return an HTTP response object resulting from an HTTP PATCH call.
:returns: ``HTTP 200`` if the resource already exists
:returns: ``HTTP 400`` if the request is malformed
:returns: ``HTTP 404`` if the resource is not found
:param resource_id: The value of the resource's primary key
"""
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
if not request.json:
raise BadRequestException('No JSON data received')
resource.update(request.json)
db.session().merge(resource)
db.session().commit()
return jsonify(resource)
@validate_fields
def post(self):
"""Return the JSON representation of a new resource created through
an HTTP POST call.
:returns: ``HTTP 201`` if a resource is properly created
:returns: ``HTTP 204`` if the resource already exists
:returns: ``HTTP 400`` if the request is malformed or missing data
"""
resource = self.__model__.query.filter_by(**request.json).first()
if resource:
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
return self._no_content_response()
resource = self.__model__(**request.json) # pylint: disable=not-callable
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().add(resource)
db.session().commit()
return self._created_response(resource)
def put(self, resource_id):
"""Return the JSON representation of a new resource created or updated
through an HTTP PUT call.
If resource_id is not provided, it is assumed the primary key field is
included and a totally new resource is created. Otherwise, the existing
resource referred to by *resource_id* is updated with the provided JSON
data. This method is idempotent.
:returns: ``HTTP 201`` if a new resource is created
:returns: ``HTTP 200`` if a resource is updated
:returns: ``HTTP 400`` if the request is malformed or missing data
"""
resource = self.__model__.query.get(resource_id)
if resource:
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
resource.update(request.json)
db.session().merge(resource)
db.session().commit()
return jsonify(resource)
resource = self.__model__(**request.json) # pylint: disable=not-callable
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().add(resource)
db.session().commit()
return self._created_response(resource)
def _meta(self):
"""Return a description of this resource as reported by the
database."""
return flask.jsonify(self.__model__.description())
def _resource(self, resource_id):
"""Return the ``sandman2.model.Model`` instance with the given
*resource_id*.
:rtype: :class:`sandman2.model.Model`
"""
resource = self.__model__.query.get(resource_id)
if not resource:
raise NotFoundException()
return resource
def _all_resources(self):
"""Return the complete collection of resources as a list of
dictionaries.
:rtype: :class:`sandman2.model.Model`
"""
queryset = self.__model__.query
args = {k: v for (k, v) in request.args.items() if k not in ('page', 'export')}
if args:
filters = []
order = []
limit = None
for key, value in args.items():
if value.startswith('%'):
filters.append(getattr(self.__model__, key).like(str(value), escape='/'))
elif key == 'sort':
order.append(getattr(self.__model__, value))
elif key == 'limit':
limit = value
elif hasattr(self.__model__, key):
filters.append(getattr(self.__model__, key) == value)
else:
raise BadRequestException('Invalid field [{}]'.format(key))
queryset = queryset.filter(*filters).order_by(*order).limit(limit)
if 'page' in request.args:
resources = queryset.paginate(int(request.args['page'])).items
else:
resources = queryset.all()
return [r.to_dict() for r in resources]
def _export(self, collection):
"""Return a CSV of the resources in *collection*.
:param list collection: A list of resources represented by dicts
"""
fieldnames = collection[0].keys()
faux_csv = ','.join(fieldnames) + '\r\n'
for resource in collection:
faux_csv += ','.join((str(x) for x in resource.values())) + '\r\n'
response = make_response(faux_csv)
response.mimetype = 'text/csv'
return response
@staticmethod
def _no_content_response():
"""Return an HTTP 204 "No Content" response.
:returns: HTTP Response
"""
response = make_response()
response.status_code = 204
return response
@staticmethod
def _created_response(resource):
"""Return an HTTP 201 "Created" response.
:returns: HTTP Response
"""
response = jsonify(resource)
response.status_code = 201
return response
| 36.729323 | 93 | 0.636643 |
ace661e77a3a2229ee0f91f4e0efb7294bbea9c5 | 26,863 | py | Python | sigal/gallery.py | matze/sigal | c083b84ae66e42abe23bc4abbeb64d651299e20a | [
"MIT"
] | null | null | null | sigal/gallery.py | matze/sigal | c083b84ae66e42abe23bc4abbeb64d651299e20a | [
"MIT"
] | null | null | null | sigal/gallery.py | matze/sigal | c083b84ae66e42abe23bc4abbeb64d651299e20a | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# Copyright (c) 2009-2016 - Simon Conseil
# Copyright (c) 2013 - Christophe-Marie Duquesne
# Copyright (c) 2014 - Jonas Kaufmann
# Copyright (c) 2015 - François D.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import absolute_import, print_function
import fnmatch
import logging
import multiprocessing
import os
import random
import sys
import zipfile
from click import progressbar, get_terminal_size
from collections import defaultdict
from datetime import datetime
from itertools import cycle
from os.path import isfile, join, splitext
from . import image, video, signals
from .compat import PY2, UnicodeMixin, strxfrm, url_quote, text_type, pickle
from .image import process_image, get_exif_tags, get_exif_data, get_size
from .settings import get_thumb
from .utils import (Devnull, copy, check_or_create_dir, url_from_path,
read_markdown, cached_property, is_valid_html5_video,
get_mime)
from .video import process_video
from .writer import Writer
class Media(UnicodeMixin):
"""Base Class for media files.
Attributes:
- ``type``: ``"image"`` or ``"video"``.
- ``filename``: Filename of the resized image.
- ``thumbnail``: Location of the corresponding thumbnail image.
- ``big``: If not None, location of the unmodified image.
- ``exif``: If not None contains a dict with the most common tags. For more
information, see :ref:`simple-exif-data`.
- ``raw_exif``: If not ``None``, it contains the raw EXIF tags.
"""
type = ''
extensions = ()
def __init__(self, filename, path, settings):
self.src_filename = self.filename = self.url = filename
self.path = path
self.settings = settings
self.ext = os.path.splitext(filename)[1].lower()
self.src_path = join(settings['source'], path, filename)
self.dst_path = join(settings['destination'], path, filename)
self.thumb_name = get_thumb(self.settings, self.filename)
self.thumb_path = join(settings['destination'], path, self.thumb_name)
self.logger = logging.getLogger(__name__)
self._get_metadata()
signals.media_initialized.send(self)
def __repr__(self):
return "<%s>(%r)" % (self.__class__.__name__, str(self))
def __unicode__(self):
return join(self.path, self.filename)
@property
def big(self):
"""Path to the original image, if ``keep_orig`` is set (relative to the
album directory). Copy the file if needed.
"""
if self.settings['keep_orig']:
s = self.settings
if s['use_orig']:
# The image *is* the original, just use it
return self.filename
orig_path = join(s['destination'], self.path, s['orig_dir'])
check_or_create_dir(orig_path)
big_path = join(orig_path, self.src_filename)
if not isfile(big_path):
copy(self.src_path, big_path,
symlink=s['orig_link'])
return url_from_path(join(s['orig_dir'], self.src_filename))
@property
def thumbnail(self):
"""Path to the thumbnail image (relative to the album directory)."""
if not isfile(self.thumb_path):
# if thumbnail is missing (if settings['make_thumbs'] is False)
if self.type == 'image':
generator = image.generate_thumbnail
elif self.type == 'video':
generator = video.generate_thumbnail
self.logger.debug('Generating thumbnail for %r', self)
path = (self.dst_path if os.path.exists(self.dst_path)
else self.src_path)
try:
generator(path, self.thumb_path, self.settings['thumb_size'],
self.settings['thumb_video_delay'],
fit=self.settings['thumb_fit'])
except Exception as e:
self.logger.error('Failed to generate thumbnail: %s', e)
return
return url_from_path(self.thumb_name)
def _get_metadata(self):
""" Get image metadata from filename.md: title, description, meta."""
self.description = ''
self.meta = {}
self.title = ''
descfile = splitext(self.src_path)[0] + '.md'
if isfile(descfile):
meta = read_markdown(descfile)
for key, val in meta.items():
setattr(self, key, val)
def _get_file_date(self):
stat = os.stat(self.src_path)
return datetime.fromtimestamp(stat.st_mtime)
class Image(Media):
"""Gather all informations on an image file."""
type = 'image'
extensions = ('.jpg', '.jpeg', '.png', '.gif')
@cached_property
def date(self):
return self.exif and self.exif.get('dateobj', None) or self._get_file_date()
@cached_property
def exif(self):
return (get_exif_tags(self.raw_exif)
if self.raw_exif and self.ext in ('.jpg', '.jpeg') else None)
@cached_property
def raw_exif(self):
try:
return (get_exif_data(self.src_path)
if self.ext in ('.jpg', '.jpeg') else None)
except Exception:
self.logger.warning(u'Could not read EXIF data from %s',
self.src_path)
@cached_property
def size(self):
return get_size(self.dst_path)
@cached_property
def thumb_size(self):
return get_size(self.thumb_path)
def has_location(self):
return self.exif is not None and 'gps' in self.exif
class Video(Media):
"""Gather all informations on a video file."""
type = 'video'
extensions = ('.mov', '.avi', '.mp4', '.webm', '.ogv', '.3gp')
def __init__(self, filename, path, settings):
super(Video, self).__init__(filename, path, settings)
base, ext = splitext(filename)
self.src_filename = filename
self.date = self._get_file_date()
if not settings['use_orig'] or not is_valid_html5_video(ext):
video_format = settings['video_format']
ext = '.' + video_format
self.filename = self.url = base + ext
self.mime = get_mime(ext)
self.dst_path = join(settings['destination'], path, base + ext)
else:
self.mime = get_mime(ext)
class Album(UnicodeMixin):
"""Gather all informations on an album.
Attributes:
:var description_file: Name of the Markdown file which gives information
on an album
:ivar index_url: URL to the index page.
:ivar output_file: Name of the output HTML file
:ivar meta: Meta data from the Markdown file.
:ivar description: description from the Markdown file.
For details how to annotate your albums with meta data, see
:doc:`album_information`.
"""
description_file = "index.md"
def __init__(self, path, settings, dirnames, filenames, gallery):
self.path = path
self.name = path.split(os.path.sep)[-1]
self.gallery = gallery
self.settings = settings
self.subdirs = dirnames
self.output_file = settings['output_filename']
self._thumbnail = None
if path == '.':
self.src_path = settings['source']
self.dst_path = settings['destination']
else:
self.src_path = join(settings['source'], path)
self.dst_path = join(settings['destination'], path)
self.logger = logging.getLogger(__name__)
self._get_metadata()
# optionally add index.html to the URLs
self.url_ext = self.output_file if settings['index_in_url'] else ''
self.index_url = url_from_path(os.path.relpath(
settings['destination'], self.dst_path)) + '/' + self.url_ext
#: List of all medias in the album (:class:`~sigal.gallery.Image` and
#: :class:`~sigal.gallery.Video`).
self.medias = medias = []
self.medias_count = defaultdict(int)
for f in filenames:
ext = splitext(f)[1]
if ext.lower() in Image.extensions:
media = Image(f, self.path, settings)
elif ext.lower() in Video.extensions:
media = Video(f, self.path, settings)
else:
continue
self.medias_count[media.type] += 1
medias.append(media)
signals.album_initialized.send(self)
def __repr__(self):
return "<%s>(path=%r, title=%r)" % (self.__class__.__name__, self.path,
self.title)
def __unicode__(self):
return (u"{} : ".format(self.path) +
', '.join("{} {}s".format(count, _type)
for _type, count in self.medias_count.items()))
def __len__(self):
return len(self.medias)
def __iter__(self):
return iter(self.medias)
def _get_metadata(self):
"""Get album metadata from `description_file` (`index.md`):
-> title, thumbnail image, description
"""
descfile = join(self.src_path, self.description_file)
self.description = ''
self.meta = {}
# default: get title from directory name
self.title = os.path.basename(self.path if self.path != '.'
else self.src_path)
if isfile(descfile):
meta = read_markdown(descfile)
for key, val in meta.items():
setattr(self, key, val)
try:
self.author = self.meta['author'][0]
except KeyError:
self.author = self.settings.get('author')
def create_output_directories(self):
"""Create output directories for thumbnails and original images."""
check_or_create_dir(self.dst_path)
if self.medias:
check_or_create_dir(join(self.dst_path,
self.settings['thumb_dir']))
if self.medias and self.settings['keep_orig']:
self.orig_path = join(self.dst_path, self.settings['orig_dir'])
check_or_create_dir(self.orig_path)
def sort_subdirs(self, albums_sort_attr):
if self.subdirs:
if albums_sort_attr:
root_path = self.path if self.path != '.' else ''
if albums_sort_attr.startswith("meta."):
meta_key = albums_sort_attr.split(".", 1)[1]
key = lambda s: strxfrm(
self.gallery.albums[join(root_path, s)].meta.get(meta_key, [''])[0])
else:
key = lambda s: strxfrm(getattr(
self.gallery.albums[join(root_path, s)], albums_sort_attr))
else:
key = strxfrm
self.subdirs.sort(key=key,
reverse=self.settings['albums_sort_reverse'])
signals.albums_sorted.send(self)
def sort_medias(self, medias_sort_attr):
if self.medias:
if medias_sort_attr == 'date':
key = lambda s: s.date or datetime.now()
elif medias_sort_attr.startswith('meta.'):
meta_key = medias_sort_attr.split(".", 1)[1]
key = lambda s: strxfrm(s.meta.get(meta_key, [''])[0])
else:
key = lambda s: strxfrm(getattr(s, medias_sort_attr))
self.medias.sort(key=key,
reverse=self.settings['medias_sort_reverse'])
signals.medias_sorted.send(self)
@property
def images(self):
"""List of images (:class:`~sigal.gallery.Image`)."""
for media in self.medias:
if media.type == 'image':
yield media
@property
def videos(self):
"""List of videos (:class:`~sigal.gallery.Video`)."""
for media in self.medias:
if media.type == 'video':
yield media
@property
def albums(self):
"""List of :class:`~sigal.gallery.Album` objects for each
sub-directory.
"""
root_path = self.path if self.path != '.' else ''
return [self.gallery.albums[join(root_path, path)]
for path in self.subdirs]
@property
def url(self):
"""URL of the album, relative to its parent."""
url = self.name.encode('utf-8')
return url_quote(url) + '/' + self.url_ext
@property
def thumbnail(self):
"""Path to the thumbnail of the album."""
if self._thumbnail:
# stop if it is already set
return url_from_path(self._thumbnail)
# Test the thumbnail from the Markdown file.
thumbnail = self.meta.get('thumbnail', [''])[0]
if thumbnail and isfile(join(self.src_path, thumbnail)):
self._thumbnail = join(self.name, get_thumb(self.settings,
thumbnail))
self.logger.debug("Thumbnail for %r : %s", self, self._thumbnail)
return url_from_path(self._thumbnail)
else:
# find and return the first landscape image
for f in self.medias:
ext = splitext(f.filename)[1]
if ext.lower() in Image.extensions:
# Use f.size if available as it is quicker (in cache), but
# fallback to the size of src_path if dst_path is missing
size = f.size
if size is None:
size = get_size(f.src_path)
if size['width'] > size['height']:
self._thumbnail = join(self.name, f.thumbnail)
self.logger.debug(
"Use 1st landscape image as thumbnail for %r :"
" %s", self, self._thumbnail)
return url_from_path(self._thumbnail)
# else simply return the 1st media file
if not self._thumbnail and self.medias:
for media in self.medias:
if media.thumbnail is not None:
self._thumbnail = join(self.name, media.thumbnail)
break
else:
self.logger.warning("No thumbnail found for %r", self)
return None
self.logger.debug("Use the 1st image as thumbnail for %r : %s",
self, self._thumbnail)
return url_from_path(self._thumbnail)
# use the thumbnail of their sub-directories
if not self._thumbnail:
for path, album in self.gallery.get_albums(self.path):
if album.thumbnail:
self._thumbnail = join(self.name, album.thumbnail)
self.logger.debug(
"Using thumbnail from sub-directory for %r : %s",
self, self._thumbnail)
return url_from_path(self._thumbnail)
self.logger.error('Thumbnail not found for %r', self)
return None
@property
def random_thumbnail(self):
try :
return url_from_path(join(self.name, random.choice(self.medias).thumbnail))
except IndexError:
return self.thumbnail
@property
def breadcrumb(self):
"""List of ``(url, title)`` tuples defining the current breadcrumb
path.
"""
if self.path == '.':
return []
path = self.path
breadcrumb = [((self.url_ext or '.'), self.title)]
while True:
path = os.path.normpath(os.path.join(path, '..'))
if path == '.':
break
url = (url_from_path(os.path.relpath(path, self.path)) + '/' +
self.url_ext)
breadcrumb.append((url, self.gallery.albums[path].title))
breadcrumb.reverse()
return breadcrumb
@property
def show_map(self):
"""Check if we have at least one photo with GPS location in the album
"""
return any(image.has_location() for image in self.images)
@cached_property
def zip(self):
"""Make a ZIP archive with all media files and return its path.
If the ``zip_gallery`` setting is set,it contains the location of a zip
archive with all original images of the corresponding directory.
"""
zip_gallery = self.settings['zip_gallery']
if zip_gallery and len(self) > 0:
zip_gallery = zip_gallery.format(album=self)
archive_path = join(self.dst_path, zip_gallery)
if self.settings.get('zip_skip_if_exists', False) and isfile(archive_path):
self.logger.debug("Archive %s already created, passing", archive_path)
return zip_gallery
archive = zipfile.ZipFile(archive_path, 'w', allowZip64=True)
attr = ('src_path' if self.settings['zip_media_format'] == 'orig'
else 'dst_path')
for p in self:
path = getattr(p, attr)
try:
archive.write(path, os.path.split(path)[1])
except OSError as e:
self.logger.warn('Failed to add %s to the ZIP: %s', p, e)
archive.close()
self.logger.debug('Created ZIP archive %s', archive_path)
return zip_gallery
class Gallery(object):
def __init__(self, settings, ncpu=None):
self.settings = settings
self.logger = logging.getLogger(__name__)
self.stats = defaultdict(int)
self.init_pool(ncpu)
check_or_create_dir(settings['destination'])
# Build the list of directories with images
albums = self.albums = {}
src_path = self.settings['source']
ignore_dirs = settings['ignore_directories']
ignore_files = settings['ignore_files']
progressChars = cycle(["/", "-", "\\", "|"])
show_progress = (self.logger.getEffectiveLevel() >= logging.WARNING and
os.isatty(sys.stdout.fileno()))
self.progressbar_target = None if show_progress else Devnull()
for path, dirs, files in os.walk(src_path, followlinks=True,
topdown=False):
if show_progress:
print("\rCollecting albums " + next(progressChars), end="")
relpath = os.path.relpath(path, src_path)
# Test if the directory match the ignore_dirs settings
if ignore_dirs and any(fnmatch.fnmatch(relpath, ignore)
for ignore in ignore_dirs):
self.logger.info('Ignoring %s', relpath)
continue
# Remove files that match the ignore_files settings
if ignore_files:
files_path = {join(relpath, f) for f in files}
for ignore in ignore_files:
files_path -= set(fnmatch.filter(files_path, ignore))
self.logger.debug('Files before filtering: %r', files)
files = [os.path.split(f)[1] for f in files_path]
self.logger.debug('Files after filtering: %r', files)
# Remove sub-directories that have been ignored in a previous
# iteration (as topdown=False, sub-directories are processed before
# their parent
for d in dirs[:]:
path = join(relpath, d) if relpath != '.' else d
if path not in albums.keys():
dirs.remove(d)
album = Album(relpath, settings, dirs, files, self)
if not album.medias and not album.albums:
self.logger.info('Skip empty album: %r', album)
else:
album.create_output_directories()
albums[relpath] = album
with progressbar(albums.values(), label="Sorting albums",
file=self.progressbar_target) as progress_albums:
for album in progress_albums:
album.sort_subdirs(settings['albums_sort_attr'])
with progressbar(albums.values(), label="Sorting media",
file=self.progressbar_target) as progress_albums:
for album in progress_albums:
album.sort_medias(settings['medias_sort_attr'])
self.logger.debug('Albums:\n%r', albums.values())
signals.gallery_initialized.send(self)
@property
def title(self):
"""Title of the gallery."""
return self.settings['title'] or self.albums['.'].title
def init_pool(self, ncpu):
try:
cpu_count = multiprocessing.cpu_count()
except NotImplementedError:
cpu_count = 1
if ncpu is None:
ncpu = cpu_count
else:
try:
ncpu = int(ncpu)
except ValueError:
self.logger.error('ncpu should be an integer value')
ncpu = cpu_count
self.logger.info("Using %s cores", ncpu)
if ncpu > 1:
self.pool = multiprocessing.Pool(processes=ncpu)
else:
self.pool = None
def get_albums(self, path):
"""Return the list of all sub-directories of path."""
for name in self.albums[path].subdirs:
subdir = os.path.normpath(join(path, name))
yield subdir, self.albums[subdir]
for subname, album in self.get_albums(subdir):
yield subname, self.albums[subdir]
def build(self, force=False):
"Create the image gallery"
if not self.albums:
self.logger.warning("No albums found.")
return
def log_func(x):
# 63 is the total length of progressbar, label, percentage, etc
available_length = get_terminal_size()[0] - 64
if x and available_length > 10:
text = text_type(x.name)[:available_length]
if PY2:
text = text.encode('utf-8')
return text
else:
return ""
try:
with progressbar(self.albums.values(), label="Collecting files",
item_show_func=log_func, show_eta=False,
file=self.progressbar_target) as albums:
media_list = [f for album in albums
for f in self.process_dir(album, force=force)]
except KeyboardInterrupt:
sys.exit('Interrupted')
bar_opt = {'label': "Processing files",
'show_pos': True,
'file': self.progressbar_target}
failed_files = []
if self.pool:
try:
with progressbar(length=len(media_list), **bar_opt) as bar:
for res in self.pool.imap_unordered(worker, media_list):
if res:
failed_files.append(res)
next(bar)
self.pool.close()
self.pool.join()
except KeyboardInterrupt:
self.pool.terminate()
sys.exit('Interrupted')
except pickle.PicklingError:
self.logger.critical(
"Failed to process files with the multiprocessing feature."
" This can be caused by some module import or object "
"defined in the settings file, which can't be serialized.",
exc_info=True)
sys.exit('Abort')
else:
with progressbar(media_list, **bar_opt) as medias:
for media_item in medias:
res = process_file(media_item)
if res:
failed_files.append(res)
if failed_files:
self.remove_files(failed_files)
print('')
if self.settings['write_html']:
writer = Writer(self.settings, index_title=self.title)
for album in self.albums.values():
writer.write(album)
signals.gallery_build.send(self)
def remove_files(self, files):
self.logger.error('Some files have failed to be processed:')
for path, filename in files:
self.logger.error(' - %s/%s', path, filename)
album = self.albums[path]
for f in album.medias:
if f.filename == filename:
self.stats[f.type + '_failed'] += 1
album.medias.remove(f)
break
self.logger.error('You can run sigal in verbose (--verbose) or debug '
'(--debug) mode to get more details.')
def process_dir(self, album, force=False):
"""Process a list of images in a directory."""
for f in album:
if isfile(f.dst_path) and not force:
self.logger.info("%s exists - skipping", f.filename)
self.stats[f.type + '_skipped'] += 1
else:
self.stats[f.type] += 1
yield (f.type, f.path, f.filename, f.src_path, album.dst_path,
self.settings)
def process_file(args):
# args => ftype, path, filename, src_path, dst_path, settings
processor = process_image if args[0] == 'image' else process_video
ret = processor(*args[3:])
# If the processor return an error (ret != 0), then we return the path and
# filename of the failed file to the parent process.
return args[1:3] if ret else None
def worker(args):
try:
return process_file(args)
except KeyboardInterrupt:
pass
| 36.849108 | 92 | 0.571418 |
ace661ebb1217a0e92b69bbc9d7e573bd3961bbf | 2,669 | py | Python | main7.py | mitliagkas/pyliakmon | 28a22be63646faf39f0f96a779a4579f69bfb41a | [
"MIT"
] | 3 | 2016-05-27T13:46:53.000Z | 2017-04-16T22:43:42.000Z | main7.py | mitliagkas/pyliakmon | 28a22be63646faf39f0f96a779a4579f69bfb41a | [
"MIT"
] | null | null | null | main7.py | mitliagkas/pyliakmon | 28a22be63646faf39f0f96a779a4579f69bfb41a | [
"MIT"
] | null | null | null | import numpy as np
from numpy import linalg as la
import time
import subprocess
from streaming import *
import cms
from multiprocessing import Process
from multiprocessing import Array
import sys
def runBOI(k,id,sharedQ,sharedR,allT,nWorkers,doneWithBlock,cond,sharedQ2,sharedR2,allT2,doneWithBlock2,cond2):
#sys.stderr = open("logs/Worker"+str(id)+".out", "a")
# Get whitening matrix
boi=ParallelTopicWhiten(
id=id,
sharedQ=sharedQ,
sharedR=sharedR,
allT=allT,
doneWithBlock=doneWithBlock,
cond=cond,
k=k,
stream=cms.PatientStream(ds=id, maxds=nWorkers)
)
for x in boi:
continue
Q=boi.getEstimate()
print Q.T[:,0:3]
R=np.frombuffer(boi.sharedR.get_obj()).reshape((k,k))
print R
W=np.copy(np.dot(Q,np.linalg.cholesky(np.diag(np.diag(R)))))
# Do tensor stuff
stream=cms.PatientStream(ds=id, maxds=nWorkers)
stream.p=k
boi=ParallelTopic(
W=W,
id=id,
sharedQ=sharedQ2,
sharedR=sharedR2,
allT=allT2,
doneWithBlock=doneWithBlock2,
cond=cond2,
k=k,
stream=stream
)
for x in boi:
continue
Q=boi.getEstimate()
print Q.T[:,0:3]
R=np.frombuffer(boi.sharedR.get_obj()).reshape((k,k))
print R
if id==1:
print "Saving results to disk"
np.savetxt('cmsQTopic.txt',Q)
np.savetxt('cmsRTopic.txt',R)
np.savetxt('cmsCompTopic.txt',np.dot(Q,np.linalg.cholesky(R)))
return
if __name__ == "__main__":
t0 = time.time()
p=260
k=3
nWorkers=2
sharedQ = Array('d', p*k)
sharedR = Array('d', k*k)
allT = Array('I', nWorkers,lock=False)
doneWithBlock = Array('I', nWorkers,lock=False)
cond = Condition()
sharedQ2 = Array('d', k*k)
sharedR2 = Array('d', k*k)
allT2 = Array('I', nWorkers,lock=False)
doneWithBlock2 = Array('I', nWorkers,lock=False)
cond2 = Condition()
processes=[]
for id in xrange(1,nWorkers+1):
arg={'id':id,'k':k,'nWorkers':nWorkers,
'sharedQ':sharedQ,'sharedR':sharedR,'allT':allT,'doneWithBlock':doneWithBlock,'cond':cond,
'sharedQ2':sharedQ2,'sharedR2':sharedR2,'allT2':allT2,'doneWithBlock2':doneWithBlock2,'cond2':cond2
}
processes += [Process(target=runBOI, kwargs=arg)]
processes[-1].start()
# Join them
for id in xrange(1,nWorkers+1):
processes[id-1].join()
t1 = time.time()
total = t1-t0
print "Total time: ", total
| 25.419048 | 116 | 0.585238 |
ace6634f9585a8fd55f7ea94177b3c9f45eeaeb4 | 3,906 | pyp | Python | plugins/py-xample_loader_r15/py-xample_loader_r15.pyp | youdiaozi/cinema4d_py_sdk_extended | 2ac9af05d450bda3351c776a3a75e0993c01a5cc | [
"Apache-2.0"
] | 1 | 2019-12-27T13:53:18.000Z | 2019-12-27T13:53:18.000Z | plugins/py-xample_loader_r15/py-xample_loader_r15.pyp | ZeusbasePython/cinema4d_py_sdk_extended | 2ac9af05d450bda3351c776a3a75e0993c01a5cc | [
"Apache-2.0"
] | null | null | null | plugins/py-xample_loader_r15/py-xample_loader_r15.pyp | ZeusbasePython/cinema4d_py_sdk_extended | 2ac9af05d450bda3351c776a3a75e0993c01a5cc | [
"Apache-2.0"
] | 1 | 2020-01-28T15:16:28.000Z | 2020-01-28T15:16:28.000Z | """
Copyright: MAXON Computer GmbH
Author: XXX, Maxime Adam
Description:
- Creates a Bitmap Loader to import a custom picture format into Cinema 4D.
Notes:
- The Bitmap Exporter corresponding to the type can be found in the sdk under the py-xample_saver folder.
- File format used is the next one:
- 6 bits (XAMPLE identifier)
- 24 bits (bit depth, width, height)
- xx bits until en of file (bz2compressed each component 1 byte (red, green, blue))
Class/method highlighted:
- c4d.plugins.BitmapLoaderData
- BitmapLoaderData.Identify()
- BitmapLoaderData.Load()
Compatible:
- Win / Mac
- R13, R14, R15, R16, R17, R18, R19, R20, R21
"""
import c4d
import struct
import bz2
# Be sure to use a unique ID obtained from www.plugincafe.com
PLUGIN_ID = 1025255
BMP_NAME = "Py-XAMPLE"
BMP_IDENTIFIER = "XAMPLE"
class MyXampleLoader(c4d.plugins.BitmapLoaderData):
"""Data class to import a *.xample file"""
def Identify(self, name, probe, size):
"""
Called by Cinema 4D, to identifie your file type (know if this Bitmap Loader can be used with the current Bitmap)
:param name: The name of the file.
:type name: str
:param probe: The start of data from the file currently tested.
:type probe: buffer
:param size: The size of the probe for testing this file type.
:type size: int
:return: True if the plugin can load this file.
"""
# Checks if image starts with identifier flag
return probe[:len(BMP_IDENTIFIER)] == BMP_IDENTIFIER
def Load(self, name, bm, frame):
"""
Called by Cinema 4D, when the plugin should loads the files as a BaseBitmap
:param name: The name of the file.
:type name: str
:param bm: The Bitmap, to be filled with the data (need to be initialized).
:type bm: c4d.bitmaps.BaseBitmap
:param frame: The current frame number for file format containing picture sequence (Quicktime, AVI...)
:type frame: int
:return: IMAGERESULT
"""
# Opens the file in binary read mode
with open(name, "rb") as fn:
# Skips identifier
lines = fn.read()[len(BMP_IDENTIFIER):]
# Calculates the bits size of 3 int and 3 char
intBitsSize = struct.calcsize("iii")
chatBitsSize = struct.calcsize("ccc")
# Extracts bit depth, width and height information
bt, width, height = struct.unpack("iii", lines[:intBitsSize])
# Initialize the bitmap with the information provided
if bm.Init(width, height, bt) != c4d.IMAGERESULT_OK:
raise MemoryError("Failed to initialize the BaseBitmap.")
# Removes the offset so we can start with position 0 of the pixel information
lines = lines[intBitsSize:]
# Decompress to raw data
lines = bz2.decompress(lines)
# Iterates each lines to fill the BaseBitmap
for x in xrange(width):
# Iterates each row
for y in xrange(height):
# Retrieves memory position according current x and y pixels
fr = (y * width * chatBitsSize) + (x * chatBitsSize)
# Extracts red, green, blue information
r, g, b = struct.unpack("ccc", lines[fr:fr+chatBitsSize])
# Assigns pixel value for x, y pixel
bm[x, y] = ord(r), ord(g), ord(b)
return c4d.IMAGERESULT_OK
if __name__ == "__main__":
# Registers the bitmap loader plugin
c4d.plugins.RegisterBitmapLoaderPlugin(id=PLUGIN_ID,
str=BMP_NAME,
info=0,
dat=MyXampleLoader())
| 36.166667 | 121 | 0.598054 |
ace664784a21b434a22890c80eaf2ba7c4494a7a | 12,788 | py | Python | elastalert/create_index.py | JamesJJ/elastalert | ccd6bb2e6b994bdf4567263fdb2152c04db98028 | [
"Apache-2.0"
] | null | null | null | elastalert/create_index.py | JamesJJ/elastalert | ccd6bb2e6b994bdf4567263fdb2152c04db98028 | [
"Apache-2.0"
] | null | null | null | elastalert/create_index.py | JamesJJ/elastalert | ccd6bb2e6b994bdf4567263fdb2152c04db98028 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import getpass
import os
import time
import json
import elasticsearch.helpers
import yaml
from auth import Auth
from elasticsearch import RequestsHttpConnection
from elasticsearch.client import Elasticsearch
from elasticsearch.client import IndicesClient
from elasticsearch.exceptions import NotFoundError
from envparse import Env
env = Env(ES_USE_SSL=bool)
def main(es_client, ea_index, recreate=False, old_ea_index=None):
esversion = es_client.info()["version"]["number"]
print("Elastic Version: " + esversion)
es_index_mappings = read_es_index_mappings() if is_atleastsix(esversion) else read_es_index_mappings(5)
es_index = IndicesClient(es_client)
if not recreate:
if es_index.exists(ea_index):
print('Index ' + ea_index + ' already exists. Skipping index creation.')
return None
# (Re-)Create indices.
if is_atleastsix(esversion):
index_names = (
ea_index,
ea_index + '_status',
ea_index + '_silence',
ea_index + '_error',
ea_index + '_past',
)
else:
index_names = (
ea_index,
)
for index_name in index_names:
if es_index.exists(index_name):
print('Deleting index ' + index_name + '.')
try:
es_index.delete(index_name)
except NotFoundError:
# Why does this ever occur?? It shouldn't. But it does.
pass
es_index.create(index_name)
# To avoid a race condition. TODO: replace this with a real check
time.sleep(2)
if is_atleastseven(esversion):
# TODO remove doc_type completely when elasicsearch client allows doc_type=None
# doc_type is a deprecated feature and will be completely removed in Elasicsearch 8
es_client.indices.put_mapping(index=ea_index, doc_type='_doc',
body=es_index_mappings['elastalert'], include_type_name=True)
es_client.indices.put_mapping(index=ea_index + '_status', doc_type='_doc',
body=es_index_mappings['elastalert_status'], include_type_name=True)
es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='_doc',
body=es_index_mappings['silence'], include_type_name=True)
es_client.indices.put_mapping(index=ea_index + '_error', doc_type='_doc',
body=es_index_mappings['elastalert_error'], include_type_name=True)
es_client.indices.put_mapping(index=ea_index + '_past', doc_type='_doc',
body=es_index_mappings['past_elastalert'], include_type_name=True)
elif is_atleastsixtwo(esversion):
es_client.indices.put_mapping(index=ea_index, doc_type='_doc',
body=es_index_mappings['elastalert'])
es_client.indices.put_mapping(index=ea_index + '_status', doc_type='_doc',
body=es_index_mappings['elastalert_status'])
es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='_doc',
body=es_index_mappings['silence'])
es_client.indices.put_mapping(index=ea_index + '_error', doc_type='_doc',
body=es_index_mappings['elastalert_error'])
es_client.indices.put_mapping(index=ea_index + '_past', doc_type='_doc',
body=es_index_mappings['past_elastalert'])
elif is_atleastsix(esversion):
es_client.indices.put_mapping(index=ea_index, doc_type='elastalert',
body=es_index_mappings['elastalert'])
es_client.indices.put_mapping(index=ea_index + '_status', doc_type='elastalert_status',
body=es_index_mappings['elastalert_status'])
es_client.indices.put_mapping(index=ea_index + '_silence', doc_type='silence',
body=es_index_mappings['silence'])
es_client.indices.put_mapping(index=ea_index + '_error', doc_type='elastalert_error',
body=es_index_mappings['elastalert_error'])
es_client.indices.put_mapping(index=ea_index + '_past', doc_type='past_elastalert',
body=es_index_mappings['past_elastalert'])
else:
es_client.indices.put_mapping(index=ea_index, doc_type='elastalert',
body=es_index_mappings['elastalert'])
es_client.indices.put_mapping(index=ea_index, doc_type='elastalert_status',
body=es_index_mappings['elastalert_status'])
es_client.indices.put_mapping(index=ea_index, doc_type='silence',
body=es_index_mappings['silence'])
es_client.indices.put_mapping(index=ea_index, doc_type='elastalert_error',
body=es_index_mappings['elastalert_error'])
es_client.indices.put_mapping(index=ea_index, doc_type='past_elastalert',
body=es_index_mappings['past_elastalert'])
print('New index %s created' % ea_index)
if old_ea_index:
print("Copying all data from old index '{0}' to new index '{1}'".format(old_ea_index, ea_index))
# Use the defaults for chunk_size, scroll, scan_kwargs, and bulk_kwargs
elasticsearch.helpers.reindex(es_client, old_ea_index, ea_index)
print('Done!')
def read_es_index_mappings(es_version=6):
print('Reading Elastic {0} index mappings:'.format(es_version))
return {
'silence': read_es_index_mapping('silence', es_version),
'elastalert_status': read_es_index_mapping('elastalert_status', es_version),
'elastalert': read_es_index_mapping('elastalert', es_version),
'past_elastalert': read_es_index_mapping('past_elastalert', es_version),
'elastalert_error': read_es_index_mapping('elastalert_error', es_version)
}
def read_es_index_mapping(mapping, es_version=6):
base_path = os.path.abspath(os.path.dirname(__file__))
mapping_path = 'es_mappings/{0}/{1}.json'.format(es_version, mapping)
path = os.path.join(base_path, mapping_path)
with open(path, 'r') as f:
print("Reading index mapping '{0}'".format(mapping_path))
return json.load(f)
def is_atleastsix(es_version):
return int(es_version.split(".")[0]) >= 6
def is_atleastsixtwo(es_version):
major, minor = map(int, es_version.split(".")[:2])
return major > 6 or (major == 6 and minor >= 2)
def is_atleastseven(es_version):
return int(es_version.split(".")[0]) >= 7
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--host', default=os.environ.get('ES_HOST', None), help='Elasticsearch host')
parser.add_argument('--port', default=os.environ.get('ES_PORT', None), type=int, help='Elasticsearch port')
parser.add_argument('--username', default=os.environ.get('ES_USERNAME', None), help='Elasticsearch username')
parser.add_argument('--password', default=os.environ.get('ES_PASSWORD', None), help='Elasticsearch password')
parser.add_argument('--url-prefix', help='Elasticsearch URL prefix')
parser.add_argument('--no-auth', action='store_const', const=True, help='Suppress prompt for basic auth')
parser.add_argument('--ssl', action='store_true', default=env('ES_USE_SSL', None), help='Use TLS')
parser.add_argument('--no-ssl', dest='ssl', action='store_false', help='Do not use TLS')
parser.add_argument('--verify-certs', action='store_true', default=None, help='Verify TLS certificates')
parser.add_argument('--no-verify-certs', dest='verify_certs', action='store_false',
help='Do not verify TLS certificates')
parser.add_argument('--index', help='Index name to create')
parser.add_argument('--old-index', help='Old index name to copy')
parser.add_argument('--send_get_body_as', default='GET',
help='Method for querying Elasticsearch - POST, GET or source')
parser.add_argument(
'--boto-profile',
default=None,
dest='profile',
help='DEPRECATED: (use --profile) Boto profile to use for signing requests')
parser.add_argument(
'--profile',
default=None,
help='AWS profile to use for signing requests. Optionally use the AWS_DEFAULT_PROFILE environment variable')
parser.add_argument(
'--aws-region',
default=None,
help='AWS Region to use for signing requests. Optionally use the AWS_DEFAULT_REGION environment variable')
parser.add_argument('--timeout', default=60, type=int, help='Elasticsearch request timeout')
parser.add_argument('--config', default='config.yaml', help='Global config file (default: config.yaml)')
parser.add_argument('--recreate', type=bool, default=False,
help='Force re-creation of the index (this will cause data loss).')
args = parser.parse_args()
if os.path.isfile(args.config):
filename = args.config
else:
filename = ''
if filename:
with open(filename) as config_file:
data = yaml.load(config_file)
host = args.host if args.host else data.get('es_host')
port = args.port if args.port else data.get('es_port')
username = args.username if args.username else data.get('es_username')
password = args.password if args.password else data.get('es_password')
url_prefix = args.url_prefix if args.url_prefix is not None else data.get('es_url_prefix', '')
use_ssl = args.ssl if args.ssl is not None else data.get('use_ssl')
verify_certs = args.verify_certs if args.verify_certs is not None else data.get('verify_certs') is not False
aws_region = data.get('aws_region', None)
send_get_body_as = data.get('send_get_body_as', 'GET')
ca_certs = data.get('ca_certs')
client_cert = data.get('client_cert')
client_key = data.get('client_key')
index = args.index if args.index is not None else data.get('writeback_index')
old_index = args.old_index if args.old_index is not None else None
else:
username = args.username if args.username else None
password = args.password if args.password else None
aws_region = args.aws_region
host = args.host if args.host else raw_input('Enter Elasticsearch host: ')
port = args.port if args.port else int(raw_input('Enter Elasticsearch port: '))
use_ssl = (args.ssl if args.ssl is not None
else raw_input('Use SSL? t/f: ').lower() in ('t', 'true'))
if use_ssl:
verify_certs = (args.verify_certs if args.verify_certs is not None
else raw_input('Verify TLS certificates? t/f: ').lower() not in ('f', 'false'))
else:
verify_certs = True
if args.no_auth is None and username is None:
username = raw_input('Enter optional basic-auth username (or leave blank): ')
password = getpass.getpass('Enter optional basic-auth password (or leave blank): ')
url_prefix = (args.url_prefix if args.url_prefix is not None
else raw_input('Enter optional Elasticsearch URL prefix (prepends a string to the URL of every request): '))
send_get_body_as = args.send_get_body_as
ca_certs = None
client_cert = None
client_key = None
index = args.index if args.index is not None else raw_input('New index name? (Default elastalert_status) ')
if not index:
index = 'elastalert_status'
old_index = (args.old_index if args.old_index is not None
else raw_input('Name of existing index to copy? (Default None) '))
timeout = args.timeout
auth = Auth()
http_auth = auth(host=host,
username=username,
password=password,
aws_region=aws_region,
profile_name=args.profile)
es = Elasticsearch(
host=host,
port=port,
timeout=timeout,
use_ssl=use_ssl,
verify_certs=verify_certs,
connection_class=RequestsHttpConnection,
http_auth=http_auth,
url_prefix=url_prefix,
send_get_body_as=send_get_body_as,
client_cert=client_cert,
ca_certs=ca_certs,
client_key=client_key)
main(es_client=es, ea_index=index, recreate=args.recreate, old_ea_index=old_index)
| 49.374517 | 130 | 0.644198 |
ace6648c9ddba6c0eab959ea5c87620c4192bcd4 | 5,626 | py | Python | web_app/settings.py | kamyarkalhor/chopen | 505e1b927245e601aec140d0e61a9c6785700304 | [
"MIT"
] | null | null | null | web_app/settings.py | kamyarkalhor/chopen | 505e1b927245e601aec140d0e61a9c6785700304 | [
"MIT"
] | null | null | null | web_app/settings.py | kamyarkalhor/chopen | 505e1b927245e601aec140d0e61a9c6785700304 | [
"MIT"
] | null | null | null | """
Django settings for Chopen project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd0vy02-g#nq@lg!s%5v$w(jilj@af791#1-3k9y7ea3c)djj!w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['chopen.herokuapp.com', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'web_app',
'digitizer',
'ion_channel',
'account',
'channelworm',
# 'predychannel',
'api',
# 'scholar',
'formtools',
'rest_framework',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'web_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'web_app', 'templates'),
os.path.join(BASE_DIR,'ion_channel', 'templates'),
os.path.join(BASE_DIR,'digitizer', 'templates'),
os.path.join(BASE_DIR,'channelworm', 'templates'),
os.path.join(BASE_DIR,'account', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Disable irritating django.db.backends logging
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'null': {
# 'level': 'DEBUG',
# 'class':'django.utils.log.NullHandler',
# },
# },
# 'loggers': {
# 'django.db.backends': {
# 'handlers': ['null'], # Quiet by default!
# 'propagate': False,
# 'level':'DEBUG',
# },
# },
# }
WSGI_APPLICATION = 'web_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
# CHANNELWORM_DB = os.path.join(BASE_DIR, 'channelworm/db.sqlite3')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
# 'channelworm': {
# 'NAME': 'CHANNELWORM_DB',
# 'ENGINE': 'django.db.backends.sqlite3',
# 'USER': 'channelworm',
# 'PASSWORD': 'veryPriv@ate'
# }
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
# Pycharm detected this
# PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# TEMPLATE_DIRS = (
# os.path.join(PROJECT_ROOT, 'templates').replace('\\', '/'),
# )
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )
STATIC_ROOT = os.path.join(BASE_DIR, 'wsgi','static')
# STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'static')
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(BASE_DIR, 'web_app', 'static'),
os.path.join(BASE_DIR, 'ion_channel', 'static'),
os.path.join(BASE_DIR, 'channelworm', 'static'),
os.path.join(BASE_DIR, 'digitizer', 'static'),
)
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = '/'
# initialize as empty so we can do `settings.configure()`
DEFAULT_INDEX_TABLESPACE = ''
DEFAULT_TABLESPACE = ''
ABSOLUTE_URL_OVERRIDES = {}
| 28.704082 | 91 | 0.657128 |
ace66563d75597c9a8e6e3952501be67b7892a1a | 15,811 | py | Python | src/anki/decks.py | jlitven/vexer | bc3b836771795acbba64f492b5bfb731adf91674 | [
"MIT"
] | 1 | 2016-03-28T09:54:06.000Z | 2016-03-28T09:54:06.000Z | src/anki/decks.py | jlitven/vexer | bc3b836771795acbba64f492b5bfb731adf91674 | [
"MIT"
] | null | null | null | src/anki/decks.py | jlitven/vexer | bc3b836771795acbba64f492b5bfb731adf91674 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright: Damien Elmes <anki@ichi2.net>
# License: GNU AGPL, version 3 or later; http://www.gnu.org/licenses/agpl.html
import copy
from anki.utils import intTime, ids2str, json
from anki.hooks import runHook
from anki.consts import *
from anki.lang import _
from anki.errors import DeckRenameError
# fixmes:
# - make sure users can't set grad interval < 1
defaultDeck = {
'newToday': [0, 0], # currentDay, count
'revToday': [0, 0],
'lrnToday': [0, 0],
'timeToday': [0, 0], # time in ms
'conf': 1,
'usn': 0,
'desc': "",
'dyn': 0, # anki uses int/bool interchangably here
'collapsed': False,
# added in beta11
'extendNew': 10,
'extendRev': 50,
}
defaultDynamicDeck = {
'newToday': [0, 0],
'revToday': [0, 0],
'lrnToday': [0, 0],
'timeToday': [0, 0],
'collapsed': False,
'dyn': 1,
'desc': "",
'usn': 0,
'delays': None,
'separate': True,
# list of (search, limit, order); we only use first element for now
'terms': [["", 100, 0]],
'resched': True,
'return': True, # currently unused
}
defaultConf = {
'name': _("Default"),
'new': {
'delays': [1, 10],
'ints': [1, 4, 7], # 7 is not currently used
'initialFactor': 2500,
'separate': True,
'order': NEW_CARDS_DUE,
'perDay': 20,
# may not be set on old decks
'bury': True,
},
'lapse': {
'delays': [10],
'mult': 0,
'minInt': 1,
'leechFails': 8,
# type 0=suspend, 1=tagonly
'leechAction': 0,
},
'rev': {
'perDay': 100,
'ease4': 1.3,
'fuzz': 0.05,
'minSpace': 1, # not currently used
'ivlFct': 1,
'maxIvl': 36500,
# may not be set on old decks
'bury': True,
},
'maxTaken': 60,
'timer': 0,
'autoplay': True,
'replayq': True,
'mod': 0,
'usn': 0,
}
class DeckManager(object):
# Registry save/load
#############################################################
def __init__(self, col):
self.col = col
def load(self, decks, dconf):
self.decks = json.loads(decks)
self.dconf = json.loads(dconf)
# set limits to within bounds
found = False
for c in self.dconf.values():
for t in ('rev', 'new'):
pd = 'perDay'
if c[t][pd] > 999999:
c[t][pd] = 999999
self.save(c)
found = True
if not found:
self.changed = False
def save(self, g=None):
"Can be called with either a deck or a deck configuration."
if g:
g['mod'] = intTime()
g['usn'] = self.col.usn()
self.changed = True
def flush(self):
if self.changed:
self.col.db.execute("update col set decks=?, dconf=?",
json.dumps(self.decks),
json.dumps(self.dconf))
self.changed = False
# Deck save/load
#############################################################
def id(self, name, create=True, type=defaultDeck):
"Add a deck with NAME. Reuse deck if already exists. Return id as int."
name = name.replace('"', '')
for id, g in self.decks.items():
if g['name'].lower() == name.lower():
return int(id)
if not create:
return None
g = copy.deepcopy(type)
if "::" in name:
# not top level; ensure all parents exist
name = self._ensureParents(name)
g['name'] = name
while 1:
id = intTime(1000)
if str(id) not in self.decks:
break
g['id'] = id
self.decks[str(id)] = g
self.save(g)
self.maybeAddToActive()
runHook("newDeck")
return int(id)
def rem(self, did, cardsToo=False, childrenToo=True):
"Remove the deck. If cardsToo, delete any cards inside."
if str(did) == '1':
# we won't allow the default deck to be deleted, but if it's a
# child of an existing deck then it needs to be renamed
deck = self.get(did)
if '::' in deck['name']:
deck['name'] = _("Default")
self.save(deck)
return
# log the removal regardless of whether we have the deck or not
self.col._logRem([did], REM_DECK)
# do nothing else if doesn't exist
if not str(did) in self.decks:
return
deck = self.get(did)
if deck['dyn']:
# deleting a cramming deck returns cards to their previous deck
# rather than deleting the cards
self.col.sched.emptyDyn(did)
if childrenToo:
for name, id in self.children(did):
self.rem(id, cardsToo)
else:
# delete children first
if childrenToo:
# we don't want to delete children when syncing
for name, id in self.children(did):
self.rem(id, cardsToo)
# delete cards too?
if cardsToo:
# don't use cids(), as we want cards in cram decks too
cids = self.col.db.list(
"select id from cards where did=? or odid=?", did, did)
self.col.remCards(cids)
# delete the deck and add a grave
del self.decks[str(did)]
# ensure we have an active deck
if did in self.active():
self.select(int(self.decks.keys()[0]))
self.save()
def allNames(self, dyn=True):
"An unsorted list of all deck names."
if dyn:
return [x['name'] for x in self.decks.values()]
else:
return [x['name'] for x in self.decks.values() if not x['dyn']]
def all(self):
"A list of all decks."
return self.decks.values()
def allIds(self):
return self.decks.keys()
def collapse(self, did):
deck = self.get(did)
deck['collapsed'] = not deck['collapsed']
self.save(deck)
def collapseBrowser(self, did):
deck = self.get(did)
collapsed = deck.get('browserCollapsed', False)
deck['browserCollapsed'] = not collapsed
self.save(deck)
def count(self):
return len(self.decks)
def get(self, did, default=True):
id = str(did)
if id in self.decks:
return self.decks[id]
elif default:
return self.decks['1']
def byName(self, name):
"Get deck with NAME."
for m in self.decks.values():
if m['name'] == name:
return m
def update(self, g):
"Add or update an existing deck. Used for syncing and merging."
self.decks[str(g['id'])] = g
self.maybeAddToActive()
# mark registry changed, but don't bump mod time
self.save()
def rename(self, g, newName):
"Rename deck prefix to NAME if not exists. Updates children."
# make sure target node doesn't already exist
if newName in self.allNames():
raise DeckRenameError(_("That deck already exists."))
# ensure we have parents
newName = self._ensureParents(newName)
# make sure we're not nesting under a filtered deck
if '::' in newName:
newParent = '::'.join(newName.split('::')[:-1])
if self.byName(newParent)['dyn']:
raise DeckRenameError(_("A filtered deck cannot have subdecks."))
# rename children
for grp in self.all():
if grp['name'].startswith(g['name'] + "::"):
grp['name'] = grp['name'].replace(g['name']+ "::",
newName + "::", 1)
self.save(grp)
# adjust name
g['name'] = newName
# ensure we have parents again, as we may have renamed parent->child
newName = self._ensureParents(newName)
self.save(g)
# renaming may have altered active did order
self.maybeAddToActive()
def renameForDragAndDrop(self, draggedDeckDid, ontoDeckDid):
draggedDeck = self.get(draggedDeckDid)
draggedDeckName = draggedDeck['name']
ontoDeckName = self.get(ontoDeckDid)['name']
if ontoDeckDid == None or ontoDeckDid == '':
if len(self._path(draggedDeckName)) > 1:
self.rename(draggedDeck, self._basename(draggedDeckName))
elif self._canDragAndDrop(draggedDeckName, ontoDeckName):
draggedDeck = self.get(draggedDeckDid)
draggedDeckName = draggedDeck['name']
ontoDeckName = self.get(ontoDeckDid)['name']
self.rename(draggedDeck, ontoDeckName + "::" + self._basename(draggedDeckName))
def _canDragAndDrop(self, draggedDeckName, ontoDeckName):
if draggedDeckName == ontoDeckName \
or self._isParent(ontoDeckName, draggedDeckName) \
or self._isAncestor(draggedDeckName, ontoDeckName):
return False
else:
return True
def _isParent(self, parentDeckName, childDeckName):
return self._path(childDeckName) == self._path(parentDeckName) + [ self._basename(childDeckName) ]
def _isAncestor(self, ancestorDeckName, descendantDeckName):
ancestorPath = self._path(ancestorDeckName)
return ancestorPath == self._path(descendantDeckName)[0:len(ancestorPath)]
def _path(self, name):
return name.split("::")
def _basename(self, name):
return self._path(name)[-1]
def _ensureParents(self, name):
"Ensure parents exist, and return name with case matching parents."
s = ""
path = self._path(name)
if len(path) < 2:
return name
for p in path[:-1]:
if not s:
s += p
else:
s += "::" + p
# fetch or create
did = self.id(s)
# get original case
s = self.name(did)
name = s + "::" + path[-1]
return name
# Deck configurations
#############################################################
def allConf(self):
"A list of all deck config."
return self.dconf.values()
def confForDid(self, did):
deck = self.get(did, default=False)
assert deck
if 'conf' in deck:
conf = self.getConf(deck['conf'])
conf['dyn'] = False
return conf
# dynamic decks have embedded conf
return deck
def getConf(self, confId):
return self.dconf[str(confId)]
def updateConf(self, g):
self.dconf[str(g['id'])] = g
self.save()
def confId(self, name, cloneFrom=defaultConf):
"Create a new configuration and return id."
c = copy.deepcopy(cloneFrom)
while 1:
id = intTime(1000)
if str(id) not in self.dconf:
break
c['id'] = id
c['name'] = name
self.dconf[str(id)] = c
self.save(c)
return id
def remConf(self, id):
"Remove a configuration and update all decks using it."
assert int(id) != 1
self.col.modSchema(check=True)
del self.dconf[str(id)]
for g in self.all():
# ignore cram decks
if 'conf' not in g:
continue
if str(g['conf']) == str(id):
g['conf'] = 1
self.save(g)
def setConf(self, grp, id):
grp['conf'] = id
self.save(grp)
def didsForConf(self, conf):
dids = []
for deck in self.decks.values():
if 'conf' in deck and deck['conf'] == conf['id']:
dids.append(deck['id'])
return dids
def restoreToDefault(self, conf):
oldOrder = conf['new']['order']
new = copy.deepcopy(defaultConf)
new['id'] = conf['id']
new['name'] = conf['name']
self.dconf[str(conf['id'])] = new
self.save(new)
# if it was previously randomized, resort
if not oldOrder:
self.col.sched.resortConf(new)
# Deck utils
#############################################################
def name(self, did, default=False):
deck = self.get(did, default=default)
if deck:
return deck['name']
return _("[no deck]")
def nameOrNone(self, did):
deck = self.get(did, default=False)
if deck:
return deck['name']
return None
def setDeck(self, cids, did):
self.col.db.execute(
"update cards set did=?,usn=?,mod=? where id in "+
ids2str(cids), did, self.col.usn(), intTime())
def maybeAddToActive(self):
# reselect current deck, or default if current has disappeared
c = self.current()
self.select(c['id'])
def cids(self, did, children=False):
if not children:
return self.col.db.list("select id from cards where did=?", did)
dids = [did]
for name, id in self.children(did):
dids.append(id)
return self.col.db.list("select id from cards where did in "+
ids2str(dids))
def recoverOrphans(self):
dids = self.decks.keys()
mod = self.col.db.mod
self.col.db.execute("update cards set did = 1 where did not in "+
ids2str(dids))
self.col.db.mod = mod
# Deck selection
#############################################################
def active(self):
"The currrently active dids. Make sure to copy before modifying."
return self.col.conf['activeDecks']
def selected(self):
"The currently selected did."
return self.col.conf['curDeck']
def current(self):
return self.get(self.selected())
def select(self, did):
"Select a new branch."
# make sure arg is an int
did = int(did)
# current deck
self.col.conf['curDeck'] = did
# and active decks (current + all children)
actv = self.children(did)
actv.sort()
self.col.conf['activeDecks'] = [did] + [a[1] for a in actv]
self.changed = True
def children(self, did):
"All children of did, as (name, id)."
name = self.get(did)['name']
actv = []
for g in self.all():
if g['name'].startswith(name + "::"):
actv.append((g['name'], g['id']))
return actv
def parents(self, did):
"All parents of did."
# get parent and grandparent names
parents = []
for part in self.get(did)['name'].split("::")[:-1]:
if not parents:
parents.append(part)
else:
parents.append(parents[-1] + "::" + part)
# convert to objects
for c, p in enumerate(parents):
parents[c] = self.get(self.id(p))
return parents
# Sync handling
##########################################################################
def beforeUpload(self):
for d in self.all():
d['usn'] = 0
for c in self.allConf():
c['usn'] = 0
self.save()
# Dynamic decks
##########################################################################
def newDyn(self, name):
"Return a new dynamic deck and set it as the current deck."
did = self.id(name, type=defaultDynamicDeck)
self.select(did)
return did
def isDyn(self, did):
return self.get(did)['dyn']
| 31.622 | 106 | 0.515464 |
ace665fac1c8da80c1d3e65c03a2bc6603d38433 | 1,579 | py | Python | config.py | pathespe/MarkerBot | 942ce3f89c79e494f54e70a06f92be0d8a6c31e7 | [
"MIT"
] | null | null | null | config.py | pathespe/MarkerBot | 942ce3f89c79e494f54e70a06f92be0d8a6c31e7 | [
"MIT"
] | 21 | 2017-06-01T04:13:30.000Z | 2018-03-21T00:40:33.000Z | config.py | pathespe/MarkerBot | 942ce3f89c79e494f54e70a06f92be0d8a6c31e7 | [
"MIT"
] | 4 | 2017-07-07T06:37:04.000Z | 2019-08-15T08:22:14.000Z | from dotenv import load_dotenv
import os
import urllib
dotenv_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '.env'))
load_dotenv(dotenv_path)
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_REGION = os.environ.get('AWS_REGION')
class Config(object):
DEBUG = False
TESTING = False
CSRF_ENABLED = True
UPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER')
MAX_CONTENT_LENGTH = 2 * 1024 * 1024
SECRET_KEY = os.environ['SECRET_KEY']
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
SQLALCHEMY_TRACK_MODIFICATIONS = False
CELERY_BACKEND = 'db+postgresql+psycopg2://{0}'.format(os.environ.get('DB'))
class ProductionConfig(Config):
DEBUG = False
BROKER_TRANSPORT_OPTIONS = {'region': AWS_REGION,
'visibility_timeout': 43200,
'polling_interval': 2,
'queue_name_prefix': 'celery-markerbot'}
CELERY_BROKER_URL = 'sqs://%s:%s@' % (urllib.quote(AWS_ACCESS_KEY_ID, safe=''),
urllib.quote(AWS_SECRET_ACCESS_KEY, safe=''))
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
CELERY_BROKER_URL = 'amqp://localhost//'
class TestingConfig(Config):
TESTING = True
CSRF_ENABLED = True
PRESERVE_CONTEXT_ON_EXCEPTION = False
CELERY_BROKER_URL = 'amqp://localhost//'
| 32.22449 | 87 | 0.668778 |
ace66673eda07d5b2ebd1a4ad389ce4195c89644 | 15,767 | py | Python | BGP_Forecast_Modules/What_If_Analysis_Modules/Conflict_Identifier.py | xinyuwang1209/BGP_Forecast_Modules | 8ecaee2f3e7bc40ed56acc0350e4e051bf751233 | [
"MIT"
] | 1 | 2019-04-08T05:13:28.000Z | 2019-04-08T05:13:28.000Z | BGP_Forecast_Modules/What_If_Analysis_Modules/Conflict_Identifier.py | xinyuwang1209/BGP_Forecast_Modules | 8ecaee2f3e7bc40ed56acc0350e4e051bf751233 | [
"MIT"
] | null | null | null | BGP_Forecast_Modules/What_If_Analysis_Modules/Conflict_Identifier.py | xinyuwang1209/BGP_Forecast_Modules | 8ecaee2f3e7bc40ed56acc0350e4e051bf751233 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import os
import sys
import ast
import json
import time
import requests
import numpy as np
import pandas as pd
import ipaddress as ip
import multiprocessing as mp
from urllib.parse import quote
# Import Utilities
from ..Utilities.Database import *
from ..Utilities.Utilities import *
class Conflict_Identifier:
def __init__(self,config):
# self.url = 'http://localhost:8080/api/bgp/validity?'
# # self.url = 'http://localhost:8080/api/v2/validity/AS12654/93.175.146.0/24'
# self.announcements = None
# self.anno_size = 0
# self.nprocess = nprocess
# self.partition_mult = partition_mult
# self.file = None
# self.html = None
# self.prefix_origin_table_name = prefix_origin_table_name
# self.announcement_table_name = announcement_table_name
# request = None
# self.partition = []
# self.header = {'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:57.0) Gecko/20100101 Firefox/57.0',
# "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
# }
self.config = config
# Table name Initialization
self.tablename = config['TABLES']
# Database Initialization
self.roas = None
return
def get_ROAs(self,mode=4):
database = init_db(self.config['DATABASE']['path_confidential'],use_dict=False)
sql = "SELECT * FROM " + self.config['TABLES']['roas'] + ";"
roas = pd.read_sql_query(sql,con=database)
database.close()
roas['prefix'] = roas['prefix'].apply(ip.ip_network)
if mode == 4:
roas = roas.loc[roas['prefix'].apply(get_network_version) == 4]
return roas
def get_asn_list(self):
connection = init_db(self.config['DATABASE']['path_confidential'])
# sql = "SELECT DISTINCT asn FROM " + self.config['TABLES']['extrapolation_results'] + ";"
# df = pd.read_sql_query(sql,con=database)
cursor = connection.cursor()
sql = "SELECT DISTINCT asn FROM " + self.config['TABLES']['extrapolation_asns'] + ";"
cursor.execute(sql)
df = cursor.fetchall()
return [a['asn'] for a in df]
def init_ROAs(self):
self.roas = self.get_ROAs(mode=4)
return
def local_validation(self,row):
# print("print row")
# print(row)
prefix,origin = row['prefix'],row['origin']
wroa,invalid_length,invalid_asn = row['wroa'],row['invalid_length'],row['invalid_asn']
current_roas = self.roas[self.roas['prefix'].apply(prefix.subnet_of)]
if current_roas.shape[0] == 0:
wroa = False
invalid_length = False
invalid_asn = False
else:
wroa = True
valid = False
for index,roas_row in current_roas.iterrows():
roas_prefix, roas_origin, max_length = roas_row['prefix'], roas_row['origin'], roas_row['max_length']
# Check whether invalid by asn
if roas_origin != origin:
invalid_asn = True
else:
invalid_asn = False
# Check whether invalid by max length
if max_length < prefix.prefixlen:
invalid_length = True
else:
invalid_legnth = False
# Remember if both passed
if invalid_length == False and invalid_length == False:
valid = True
if valid:
invalid_length == False and invalid_length == False
# row['wroa'] = wroa
# row['invalid_asn'] = invalid_asn
# row['invalid_length'] = invalid_length
return {'wroa': wroa, 'invalid_asn': invalid_asn, 'invalid_length': invalid_length}
def get_prefix_origin_query(self,asn):
query_asn = str(asn)
connection = init_db(self.config['DATABASE']['path_confidential'])
sql = "SELECT prefix,origin,received_from_asn FROM " + self.config['TABLES']['extrapolation_results'] + " WHERE asn=" + query_asn + ";"
df = pd.read_sql_query(sql,con=connection)
# df['prefix'] = df['prefix_origin'].apply(lambda x: x.split("-")[0])
# df['origin'] = df['prefix_origin'].apply(lambda x: int(x.split("-")[1]))
connection.close()
return df
def get_prefix_origin_dump(self):
url = "http://www.ris.ripe.net/dumps/riswhoisdump.IPv4.gz"
df = pd.read_table(url,compression='gzip',sep='\t',header=15,names=['origin','prefix','seen-peer'])
df = df[['prefix','origin']]
if type(df.iloc[-1,0]) is not str:
df = df.iloc[:-1,:]
print("Size of prefix origin pairs is:", df.shape[0])
special = df[df['origin'].str.contains('{')]
df = df[~df['origin'].str.contains('{')]
lst = []
for index, row in special.iterrows():
prefix, origins = row['prefix'], ast.literal_eval(row['origin'])
for origin in origins:
# df2 = pd.concat([df2, pd.DataFrame([prefix,origin],columns=['prefix', 'origin'])])
lst.append([prefix,origin])
df = pd.concat([df, pd.DataFrame(lst,columns=['prefix','origin'])])
return df[['prefix','origin']]
def validation(self,wroa,invalid_length,invalid_asn):
return
def init_unique_prefix_origin_table(self):
pass
def init_prefix_origin_table(self,asn,table,table_new):
# Then add table
sql = '''DROP TABLE IF EXISTS ''' + table_new + ''';
CREATE TABLE IF NOT EXISTS ''' + table_new + '''
AS SELECT * FROM ''' + table + '''
WHERE asn=''' + str(asn) + ";"
self.sql_operation(sql)
return
# def init_prefix_origin_table(self,asn=0):
# # Get table name
# # Default name is prefix_origin
# connection = init_db(self.config['DATABASE']['path_confidential'],use_dict=True)
# table_prefix_origin = self.config['TABLES']['prefix_origin_distinct']
# if asn != 0:
# table_prefix_origin = table_prefix_origin + '_' + str(asn)
# cursor = connection.cursor()
# # Drop current table if exists
# # Then add table
# sql = '''DROP TABLE IF EXISTS ''' + table_prefix_origin + ''';
# CREATE TABLE IF NOT EXISTS ''' + table_prefix_origin + ''' (
# ''' + table_prefix_origin + '''_id serial PRIMARY KEY,
# prefix cidr,
# origin bigint,
# wroa boolean DEFAULT false,
# invalid_length boolean DEFAULT false,
# inbalid_asn boolean DEFAULT false,
# hijack boolean);'''
# cursor.execute(sql)
# connection.commit()
# connection.close()
# return
def store_prefix_origin(self,df):
# connection = init_db(self.config['DATABASE']['path_confidential'],use_dict=True)
table_prefix_origin = self.config['TABLES']['prefix_origin_distinct']
engine = DB(self.config['DATABASE']['path_confidential']).generate_engine()
df.to_sql(table_prefix_origin,con=engine,if_exists='replace')
return
# file = df.to_dict('records')
def get_announcements_size(self):
connection = init_db(self.config['DATABASE']['path_confidential'],use_dict=True)
cursor = connection.cursor()
table_prefix_origin = self.config['TABLES']['prefix_origin_distinct']
cursor.execute("SELECT COUNT(1) FROM " + table_prefix_origin + ";")
po_size = self.cursor.fetchall()[0]['count']
print("The total size of the announcement are:", self.anno_size)
connection.commit()
connection.close()
return po_size
def alter_prefix_origin_columns(self,table,asn=0):
date_format = self.config['DEFAULT']['date_format']
if asn == 0:
# Add new columns on poa_distinct
# extract(epoch from now() at time zone 'utc')::integer
sql = "ALTER TABLE " + table + '''
ALTER COLUMN prefix TYPE cidr USING prefix::cidr,
ALTER COLUMN origin TYPE bigint USING origin::bigint,
ADD COLUMN IF NOT EXISTS invalid_length boolean default false,
ADD COLUMN IF NOT EXISTS invalid_asn boolean default false,
ADD COLUMN IF NOT EXISTS hijack boolean default false,
ADD COLUMN IF NOT EXISTS first_seen timestamp default now(),
DROP COLUMN IF EXISTS ann_id,
DROP COLUMN IF EXISTS priority;'''
else:
sql = "ALTER TABLE " + table + '''
ADD COLUMN IF NOT EXISTS prefix cidr,
ADD COLUMN IF NOT EXISTS origin bigint,
ADD COLUMN IF NOT EXISTS invalid_length boolean default false,
ADD COLUMN IF NOT EXISTS invalid_asn boolean default false,
ADD COLUMN IF NOT EXISTS hijack boolean default false,
ADD COLUMN IF NOT EXISTS first_seen timestamp default now(),
DROP COLUMN IF EXISTS ann_id,
DROP COLUMN IF EXISTS priority;'''
self.sql_operation(sql)
return
def create_index(self, table, asn=0,prefixorigin=False):
if prefixorigin:
sql = "DROP INDEX IF EXISTS " + table + '''_po_index;
CREATE INDEX ''' + table + "_po_index ON " + table + " (prefix_origin);"
else:
sql = "DROP INDEX IF EXISTS " + table + '''_p_o_index;
CREATE INDEX ''' + table + "_p_o_index ON " + table + " (prefix, origin);"
self.sql_operation(sql)
return
def split_prefix_origin(self,asn,table):
# UPDATE prefix;
sql = '''
UPDATE ''' + table + '''
SET prefix = split_part(''' + table + '''.prefix_origin, '-', 1)::cidr,
origin = split_part(''' + table + '''.prefix_origin, '-', 2)::bigint;'''
self.sql_operation(sql)
return
def drop_prefix_origin(self,asn,table):
# UPDATE prefix;
sql = "ALTER TABLE " + table + " DROP COLUMN IF EXISTS prefix_origin;"
self.sql_operation(sql)
return
def sql_operation(self,sql,connection=None):
internal_cursor = True
if connection is None:
internal_cursor = False
connection = init_db(self.config['DATABASE']['path_confidential'],use_dict=False)
cursor = connection.cursor()
cursor.execute(sql)
connection.commit()
if internal_cursor:
connection.close()
return
def init_po_distinct(self):
table_source = 'simplified_elements'
sql = '''
DROP TABLE IF EXISTS ''' + self.tablename['po_analysis_distinct'] + ''';
CREATE TABLE ''' + self.tablename['po_analysis_distinct'] + '''
AS SELECT element_id,prefix,as_path[1] FROM ''' + table_source + ";"
self.cursor.execute(sql)
self.conn.commit()
sql = '''
ALTER TABLE ''' + self.tablename['po_analysis_distinct'] + '''
RENAME COLUMN as_path TO origin;'''
self.cursor.execute(sql)
self.conn.commit()
sql = '''
ALTER TABLE ''' + self.tablename['po_analysis_distinct'] + '''
ADD COLUMN wroa boolean default false,
ADD COLUMN invalid_length boolean default false,
ADD COLUMN invalid_asn boolean default false,
ADD COLUMN first_seen date;'''
self.cursor.execute(sql)
self.conn.commit()
sql = '''
CREATE INDEX ''' + self.tablename['po_analysis_distinct'] + self.tablename['po_analysis_distinct_index'] + '''
ON ''' + self.tablename['po_analysis_distinct'] + " (prefix);"
self.cursor.execute(sql)
self.conn.commit()
start_time = time.time()
# Check whether prefix-origin pair has roa
sql = "UPDATE " + self.tablename['po_analysis_distinct'] + '''
SET wroa = true
FROM roas
WHERE prefix <<=roas.roa_prefix;'''
self.cursor.execute(sql)
self.conn.commit()
print("Checking if exists roas",time.time()-start_time)
def validate_and_store(self,announcement):
prefix = announcement['prefix']
origin = announcement['origin']
url = self.url + 'prefix=' + quote(prefix,safe="") + '&asn=' + str(origin)
html = requests.get(url, headers=self.header)
try:
result = html.json()['data']
except:
print(html)
print(prefix,origin)
validity = 'UNKNOWN'
return
if result['validity'] == 'INVALID_ASN':
validity = '00'
elif result['validity'] == 'INVALID_LENGTH':
validity = '01'
elif result['validity'] == 'UNKNOWN':
validity = '10'
elif result['validity'] == 'VALID':
validity = '11'
else:
print(prefix,origin,"NOT FOUND")
return
self.partition.append([prefix,origin,validity])
return
def validate_and_store_announcements(self):
processes = []
query_size = self.nprocess*self.partition_mult
print('anno size:', self.anno_size)
sql = "SELECT * FROM " + self.announcement_table_name + " OFFSET " + str(0) + " LIMIT " + str(query_size) + ";"
self.cursor.execute(sql)
annos = self.cursor.fetchall()
# print(annos)
start_time = time.time()
for i in range(self.anno_size):
p = mp.Process(target=self.validate_and_store, args=[annos[i % query_size]])
processes.append(p)
if i == self.anno_size - 1 or (i+1) % query_size == 0:
self.store_partition_to_table()
self.partition = []
sql = "SELECT * FROM " + self.announcement_table_name + " OFFSET " + str(i+1) + " LIMIT " + str(query_size)
self.cursor.execute(sql)
annos = self.cursor.fetchall()
print(i, '/', self.anno_size, 'completed.')
print('Elapsed_time:', time.time() - start_time)
start_time = time.time()
if i == self.anno_size - 1 or (i+1) % self.nprocess == 0:
for p in processes:
p.start()
for p in processes:
p.join()
processes = []
return
def store_partition_to_table(self):
for prefix,origin,validity in self.partition:
if validity == '00' or validity == '01':
# next_AS = announcement['as_path'][1]
self.cursor.execute('''INSERT INTO
prefix_origin (po_prefix, po_origin, po_validity)
VALUES(%s, %s, %s)''', (prefix, origin, validity))
return
#sql = "SELECT prefix,origin," + self.announcement_table_name + ";"
#self.cursor.execute(sql)
#self.announcements = self.cursor.fetchall()
#print(len(self.announcements))
#return self.announcements
if __name__ == "__main__":
Instance = Conflict_Identifier()
Instance.init_po_distinct()
Instance.init_po_analysis_distinct()
# Instance.get_announcements()
# Instance.validate_and_storing_announcements()
# Instance.download_Invalid_ROAs()
# Instance.store_Invalid_ROAs()
# print(announcements[0])
# counter = 0
#for anno in announcements:
# result = Instance.validate_announcement(anno)
# if result == 1:
# counter += 1
# print(counter)
| 39.027228 | 143 | 0.577409 |
ace66681b26478812f2583ea6da2c2fa6f7e21d3 | 11,883 | py | Python | tests/test_feature_encoders.py | metataro/sc2_imitation_learning | 8dca03e9be92e2d8297a4bc34248939af5c7ec3b | [
"MIT"
] | 15 | 2021-06-04T09:38:36.000Z | 2021-12-02T14:01:14.000Z | tests/test_feature_encoders.py | metataro/sc2_imitation_learning | 8dca03e9be92e2d8297a4bc34248939af5c7ec3b | [
"MIT"
] | 3 | 2021-08-20T13:39:13.000Z | 2022-03-26T03:25:35.000Z | tests/test_feature_encoders.py | metataro/sc2_imitation_learning | 8dca03e9be92e2d8297a4bc34248939af5c7ec3b | [
"MIT"
] | 2 | 2021-06-16T08:50:30.000Z | 2021-07-24T16:38:16.000Z | import numpy as np
import pysc2.lib.actions
import tensorflow as tf
from pysc2.env.sc2_env import Race
from pysc2.lib.features import Player
from pysc2.lib.static_data import UNIT_TYPES, ABILITIES
from pysc2.lib.upgrades import Upgrades
from sc2_imitation_learning.agents.common.feature_encoder import IdentityEncoder, PlayerEncoder, RaceEncoder, \
UpgradesEncoder, GameLoopEncoder, AvailableActionsEncoder, UnitCountsEncoder, ActionEncoder, ControlGroupEncoder, \
ProductionQueueEncoder, UnitSelectionEncoder, OneHotEncoder, ScaleEncoder, LogScaleEncoder, UnitTypeEncoder
from sc2_imitation_learning.agents.common.unit_group_encoder import mask_unit_group
from sc2_imitation_learning.common.conv import ConvNet2D, ConvNet1D
from sc2_imitation_learning.common.transformer import SC2EntityTransformerEncoder
class FeatureEncoderTest(tf.test.TestCase):
def test_identity_encoder(self):
enc = IdentityEncoder()
# test float tensor
t = tf.constant([[0]], dtype=tf.float32)
self.assertEqual(t, enc(t))
# test uint8 tensor
t = tf.constant([[0]], dtype=tf.uint8)
self.assertEqual(t, enc(t))
def test_player_encoder(self):
enc = PlayerEncoder(embedding_size=16)
player_features = tf.constant([list(range(len(Player)))], dtype=tf.uint8)
encoded_player_features = enc(player_features)
self.assertEqual(encoded_player_features.dtype, tf.float32)
self.assertEqual(encoded_player_features.shape.as_list(), [1, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_player_features)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_player_features)), False)
def test_race_encoder(self):
enc = RaceEncoder(embedding_size=16)
for race in range(len(Race)):
race_features = tf.constant([[race]], dtype=tf.uint16)
encoded_race_features = enc(race_features)
self.assertEqual(encoded_race_features.dtype, tf.float32)
self.assertEqual(encoded_race_features.shape.as_list(), [1, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_race_features)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_race_features)), False)
def test_upgrades_encoder(self):
enc = UpgradesEncoder(embedding_size=16)
raw_upgrades = np.full(shape=(1, len(Upgrades)), fill_value=False, dtype=np.bool)
raw_upgrades[:, ::2] = True
raw_upgrades = tf.constant(raw_upgrades)
encoded_upgrades = enc(raw_upgrades)
self.assertEqual(encoded_upgrades.dtype, tf.float32)
self.assertEqual(encoded_upgrades.shape.as_list(), [1, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_upgrades)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_upgrades)), False)
def test_game_loop_encoder(self):
enc = GameLoopEncoder(embedding_size=16)
raw_game_loop = tf.constant([[4], [8], [9]], dtype=tf.uint16)
encoded_game_loop = enc(raw_game_loop)
self.assertEqual(encoded_game_loop.dtype, tf.float32)
self.assertEqual(encoded_game_loop.shape.as_list(), [3, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_game_loop)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_game_loop)), False)
def test_available_actions_encoder(self):
enc = AvailableActionsEncoder(embedding_size=16)
raw_available_actions = np.full(shape=(1, len(pysc2.lib.actions.FUNCTIONS)), fill_value=0, dtype=np.uint16)
raw_available_actions[:, [0, 1, 3, 5]] = 1
raw_available_actions = tf.constant(raw_available_actions, dtype=tf.uint16)
encoded_available_actions = enc(raw_available_actions)
self.assertEqual(encoded_available_actions.dtype, tf.float32)
self.assertEqual(encoded_available_actions.shape.as_list(), [1, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_available_actions)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_available_actions)), False)
def test_unit_counts_encoder(self):
enc = UnitCountsEncoder(embedding_size=16)
raw_unit_counts = np.full(shape=(1, len(UNIT_TYPES)), fill_value=0, dtype=np.uint16)
raw_unit_counts[:, [0, 1, 3, 5]] = [1, 2, 3, 4]
raw_unit_counts = tf.constant(raw_unit_counts, dtype=tf.uint16)
encoded_raw_unit_counts = enc(raw_unit_counts)
self.assertEqual(encoded_raw_unit_counts.dtype, tf.float32)
self.assertEqual(encoded_raw_unit_counts.shape.as_list(), [1, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_raw_unit_counts)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_raw_unit_counts)), False)
def test_action_encoder(self):
enc = ActionEncoder(num_actions=16, embedding_size=16)
raw_action = tf.constant([-1, 15, 20], dtype=tf.int64)
encoded_raw_action = enc(raw_action)
self.assertEqual(encoded_raw_action.dtype, tf.float32)
self.assertEqual(encoded_raw_action.shape.as_list(), [3, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_raw_action)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_raw_action)), False)
def test_control_group_encoder(self):
enc = ControlGroupEncoder(encoder=ConvNet1D(
output_channels=[16], kernel_shapes=[1], strides=[1], paddings=['SAME'], activate_final=True))
raw_control_group = tf.constant([np.stack([
np.asarray([UNIT_TYPES[i] for i in range(10)], dtype=np.uint16),
np.arange(10, dtype=np.uint16)
], axis=-1)], dtype=tf.uint16)
encoded_control_group = enc(raw_control_group)
self.assertEqual(encoded_control_group.dtype, tf.float32)
self.assertEqual(encoded_control_group.shape.as_list(), [1, 10, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_control_group)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_control_group)), False)
def test_production_queue_encoder(self):
raw_control_group = tf.constant([np.stack([
np.asarray([ABILITIES[i] for i in range(16)], dtype=np.uint16),
np.arange(16, dtype=np.uint16)
], axis=-1)], dtype=tf.uint16)
# test with full production queue
enc = ProductionQueueEncoder(max_position=16, encoder=ConvNet1D(
output_channels=[16], kernel_shapes=[1], strides=[1], paddings=['SAME'], activate_final=True))
encoded_control_group = enc(raw_control_group)
self.assertEqual(encoded_control_group.dtype, tf.float32)
self.assertEqual(encoded_control_group.shape.as_list(), [1, 16, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_control_group)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_control_group)), False)
# test with production queue of length 10
enc = ProductionQueueEncoder(max_position=16, encoder=SC2EntityTransformerEncoder(
num_layers=2, model_dim=32, num_heads=2, dff=64, mask_value=0))
encoded_control_group = enc(mask_unit_group(raw_control_group, tf.constant([10], dtype=tf.int32), -1))
self.assertEqual(encoded_control_group.dtype, tf.float32)
self.assertEqual(encoded_control_group.shape.as_list(), [1, 16, 32])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_control_group)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_control_group)), False)
self.assertNotEqual(tf.reduce_sum(encoded_control_group[:, 0:]), 0)
self.assertEqual(tf.reduce_sum(encoded_control_group[:, 10:]), 0)
# test with empty production queue
encoded_control_group = enc(mask_unit_group(raw_control_group, tf.constant([0], dtype=tf.int32), -1))
self.assertEqual(encoded_control_group.dtype, tf.float32)
self.assertEqual(encoded_control_group.shape.as_list(), [1, 16, 32])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_control_group)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_control_group)), False)
self.assertEqual(tf.reduce_sum(encoded_control_group[:, 0:]), 0)
def test_unit_selection_encoder(self):
enc = UnitSelectionEncoder(encoder=ConvNet1D(
output_channels=[16], kernel_shapes=[1], strides=[1], paddings=['SAME'], activate_final=True))
raw_control_group = tf.constant([[[
UNIT_TYPES[0], # unit_type
0, # player_relative
100, # health
0, # shields
0, # energy
0, # transport_slots_taken
0, # build_progress
]]], dtype=tf.uint16)
encoded_control_group = enc(raw_control_group)
self.assertEqual(encoded_control_group.dtype, tf.float32)
self.assertEqual(encoded_control_group.shape.as_list(), [1, 1, 16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_control_group)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_control_group)), False)
def test_one_hot_encoder(self):
enc = OneHotEncoder(depth=16)
raw_feature = tf.constant([-1, 15, 20], dtype=tf.int64)
encoded_feature = enc(raw_feature)
self.assertEqual(encoded_feature.dtype, tf.float32)
self.assertEqual(encoded_feature.shape.as_list(), [3, 16])
self.assertAllClose(encoded_feature, [[0]*16, ([0]*15) + [1], [0]*16])
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_feature)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_feature)), False)
def test_scale_encoder(self):
enc = ScaleEncoder(factor=0.5)
raw_feature = tf.constant([-1, 0, 20], dtype=tf.int64)
encoded_feature = enc(raw_feature)
self.assertEqual(encoded_feature.dtype, tf.float32)
self.assertEqual(encoded_feature.shape.as_list(), [3, 1])
self.assertAllClose(encoded_feature, [[-0.5], [0.0], [10.0]])
def test_log_scale_encoder(self):
enc = LogScaleEncoder()
raw_feature = tf.constant([0, 1, 20], dtype=tf.int64)
encoded_feature = enc(raw_feature)
self.assertEqual(encoded_feature.dtype, tf.float32)
self.assertEqual(encoded_feature.shape.as_list(), [3, 1])
self.assertAllClose(encoded_feature, [[0], [np.log(2)], [np.log(21)]])
def test_unit_type_encoder(self):
enc = UnitTypeEncoder(max_unit_count=2, encoder=ConvNet1D(
output_channels=[16], kernel_shapes=[1], strides=[1], paddings=['SAME'], activate_final=True))
raw_feature = tf.constant([
[
[UNIT_TYPES[0], 0],
[0, UNIT_TYPES[1]]
],
[
[UNIT_TYPES[0], 0],
[UNIT_TYPES[1], 0]
]
], dtype=tf.uint16)
encoded_feature = enc(raw_feature)
self.assertEqual(encoded_feature.dtype, tf.float32)
self.assertEqual(encoded_feature.shape.as_list(), [2, 2, 2, 16])
self.assertAllClose(encoded_feature[0, 0, 1], np.zeros((16,)))
self.assertAllClose(encoded_feature[0, 1, 0], np.zeros((16,)))
self.assertAllClose(encoded_feature[1, 0, 1], np.zeros((16,)))
self.assertAllClose(encoded_feature[1, 1, 1], np.zeros((16,)))
self.assertNotAllClose(encoded_feature[0, 0, 0], np.zeros((16,)))
self.assertNotAllClose(encoded_feature[0, 1, 1], np.zeros((16,)))
self.assertNotAllClose(encoded_feature[1, 0, 0], np.zeros((16,)))
self.assertNotAllClose(encoded_feature[1, 1, 0], np.zeros((16,)))
self.assertEqual(tf.reduce_any(tf.math.is_inf(encoded_feature)), False)
self.assertEqual(tf.reduce_any(tf.math.is_nan(encoded_feature)), False)
| 45.528736 | 119 | 0.688042 |
ace6673abef1385a18062669859d4e1cdaa7ebc1 | 13,959 | py | Python | src/datasets/utils/filelock.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 10,608 | 2020-09-10T15:47:50.000Z | 2022-03-31T22:51:47.000Z | src/datasets/utils/filelock.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 2,396 | 2020-09-10T14:55:31.000Z | 2022-03-31T19:41:04.000Z | src/datasets/utils/filelock.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 1,530 | 2020-09-10T21:43:10.000Z | 2022-03-31T01:59:12.000Z | # This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org>
"""
A platform independent file lock that supports the with-statement.
"""
# Modules
# ------------------------------------------------
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
warnings = None
try:
import msvcrt
except ImportError:
msvcrt = None
try:
import fcntl
except ImportError:
fcntl = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
TimeoutError = OSError
# Data
# ------------------------------------------------
__all__ = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
__version__ = "3.0.12"
_logger = None
def logger():
"""Returns the logger instance used in this module."""
global _logger
_logger = _logger or logging.getLogger(__name__)
return _logger
# Exceptions
# ------------------------------------------------
class Timeout(TimeoutError):
"""
Raised when the lock could not be acquired in *timeout*
seconds.
"""
def __init__(self, lock_file):
""" """
#: The path of the file lock.
self.lock_file = lock_file
return None
def __str__(self):
temp = f"The file lock '{self.lock_file}' could not be acquired."
return temp
# Classes
# ------------------------------------------------
# This is a helper class which is returned by :meth:`BaseFileLock.acquire`
# and wraps the lock to make sure __enter__ is not called twice when entering
# the with statement.
# If we would simply return *self*, the lock would be acquired again
# in the *__enter__* method of the BaseFileLock, but not released again
# automatically.
#
# :seealso: issue #37 (memory leak)
class _Acquire_ReturnProxy:
def __init__(self, lock):
self.lock = lock
return None
def __enter__(self):
return self.lock
def __exit__(self, exc_type, exc_value, traceback):
self.lock.release()
return None
class BaseFileLock:
"""
Implements the base class of a file lock.
"""
def __init__(self, lock_file, timeout=-1, max_filename_length=None):
""" """
max_filename_length = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lock_file = self.hash_filename_if_too_long(lock_file, max_filename_length)
# The path to the lock file.
self._lock_file = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
self._lock_file_fd = None
# The default timeout value.
self.timeout = timeout
# We use this lock primarily for the lock counter.
self._thread_lock = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
self._lock_counter = 0
return None
@property
def lock_file(self):
"""
The path to the lock file.
"""
return self._lock_file
@property
def timeout(self):
"""
You can set a default timeout for the filelock. It will be used as
fallback value in the acquire method, if no timeout value (*None*) is
given.
If you want to disable the timeout, set it to a negative value.
A timeout of 0 means, that there is exactly one attempt to acquire the
file lock.
.. versionadded:: 2.0.0
"""
return self._timeout
@timeout.setter
def timeout(self, value):
""" """
self._timeout = float(value)
return None
# Platform dependent locking
# --------------------------------------------
def _acquire(self):
"""
Platform dependent. If the file lock could be
acquired, self._lock_file_fd holds the file descriptor
of the lock file.
"""
raise NotImplementedError()
def _release(self):
"""
Releases the lock and sets self._lock_file_fd to None.
"""
raise NotImplementedError()
# Platform independent methods
# --------------------------------------------
@property
def is_locked(self):
"""
True, if the object holds the file lock.
.. versionchanged:: 2.0.0
This was previously a method and is now a property.
"""
return self._lock_file_fd is not None
def acquire(self, timeout=None, poll_intervall=0.05):
"""
Acquires the file lock or fails with a :exc:`Timeout` error.
.. code-block:: python
# You can use this method in the context manager (recommended)
with lock.acquire():
pass
# Or use an equivalent try-finally construct:
lock.acquire()
try:
pass
finally:
lock.release()
:arg float timeout:
The maximum time waited for the file lock.
If ``timeout < 0``, there is no timeout and this method will
block until the lock could be acquired.
If ``timeout`` is None, the default :attr:`~timeout` is used.
:arg float poll_intervall:
We check once in *poll_intervall* seconds if we can acquire the
file lock.
:raises Timeout:
if the lock could not be acquired in *timeout* seconds.
.. versionchanged:: 2.0.0
This method returns now a *proxy* object instead of *self*,
so that it can be used in a with statement without side effects.
"""
# Use the default timeout, if no timeout is provided.
if timeout is None:
timeout = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lock_id = id(self)
lock_filename = self._lock_file
start_time = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}")
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}")
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}")
raise Timeout(self._lock_file)
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..."
)
time.sleep(poll_intervall)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
self._lock_counter = max(0, self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def release(self, force=False):
"""
Releases the file lock.
Please note, that the lock is only completly released, if the lock
counter is 0.
Also note, that the lock file itself is not automatically deleted.
:arg bool force:
If true, the lock counter is ignored and the lock is released in
every case.
"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lock_id = id(self)
lock_filename = self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}")
self._release()
self._lock_counter = 0
logger().debug(f"Lock {lock_id} released on {lock_filename}")
return None
def __enter__(self):
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.release()
return None
def __del__(self):
self.release(force=True)
return None
def hash_filename_if_too_long(self, path: str, max_length: int) -> str:
filename = os.path.basename(path)
if len(filename) > max_length and max_length > 0:
dirname = os.path.dirname(path)
hashed_filename = str(hash(filename))
new_filename = filename[: max_length - len(hashed_filename) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(dirname, new_filename)
else:
return path
# Windows locking mechanism
# ~~~~~~~~~~~~~~~~~~~~~~~~~
class WindowsFileLock(BaseFileLock):
"""
Uses the :func:`msvcrt.locking` function to hard lock the lock file on
windows systems.
"""
def __init__(self, lock_file, timeout=-1, max_filename_length=None):
from .file_utils import relative_to_absolute_path
super().__init__(lock_file, timeout=timeout, max_filename_length=max_filename_length)
self._lock_file = "\\\\?\\" + relative_to_absolute_path(self.lock_file)
def _acquire(self):
open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
fd = os.open(self._lock_file, open_mode)
except OSError:
pass
else:
try:
msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
except (IOError, OSError):
os.close(fd)
else:
self._lock_file_fd = fd
return None
def _release(self):
fd = self._lock_file_fd
self._lock_file_fd = None
msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
os.close(fd)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
# Unix locking mechanism
# ~~~~~~~~~~~~~~~~~~~~~~
class UnixFileLock(BaseFileLock):
"""
Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.
"""
def __init__(self, lock_file, timeout=-1, max_filename_length=None):
max_filename_length = os.statvfs(os.path.dirname(lock_file)).f_namemax
super().__init__(lock_file, timeout=timeout, max_filename_length=max_filename_length)
def _acquire(self):
open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC
fd = os.open(self._lock_file, open_mode)
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except (IOError, OSError):
os.close(fd)
else:
self._lock_file_fd = fd
return None
def _release(self):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
fd = self._lock_file_fd
self._lock_file_fd = None
fcntl.flock(fd, fcntl.LOCK_UN)
os.close(fd)
return None
# Soft lock
# ~~~~~~~~~
class SoftFileLock(BaseFileLock):
"""
Simply watches the existence of the lock file.
"""
def _acquire(self):
open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
fd = os.open(self._lock_file, open_mode)
except (IOError, OSError):
pass
else:
self._lock_file_fd = fd
return None
def _release(self):
os.close(self._lock_file_fd)
self._lock_file_fd = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
# Platform filelock
# ~~~~~~~~~~~~~~~~~
#: Alias for the lock, which should be used for the current platform. On
#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for
#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`.
FileLock = None
if msvcrt:
FileLock = WindowsFileLock
elif fcntl:
FileLock = UnixFileLock
else:
FileLock = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 29.202929 | 112 | 0.595745 |
ace6674c621c5b65dfe3b1dd87615ea0ea351a21 | 170 | py | Python | main.py | sskender/earthquake-analyzer | d048ea2423797587dd6d0c466b55257dfa4e835c | [
"MIT"
] | null | null | null | main.py | sskender/earthquake-analyzer | d048ea2423797587dd6d0c466b55257dfa4e835c | [
"MIT"
] | null | null | null | main.py | sskender/earthquake-analyzer | d048ea2423797587dd6d0c466b55257dfa4e835c | [
"MIT"
] | null | null | null | import tkinter
from src.gui_app import *
def main():
root = tkinter.Tk()
program = Application(root)
root.mainloop()
if __name__ == "__main__":
main() | 14.166667 | 31 | 0.641176 |
ace667eac4d9251e009e04275a8490cd1d9bc158 | 1,144 | py | Python | src/language.py | masuP9/almanac.httparchive.org | 213fceef7d64a0b7386f72c7ad7cc58ef42b5095 | [
"Apache-2.0"
] | null | null | null | src/language.py | masuP9/almanac.httparchive.org | 213fceef7d64a0b7386f72c7ad7cc58ef42b5095 | [
"Apache-2.0"
] | 90 | 2020-07-22T06:15:37.000Z | 2022-03-31T17:03:07.000Z | src/language.py | sitedata/almanac.httparchive.org | 6df98ca9b6f2b47fe1e932af4f3ee611a80070d0 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
class _Language(object):
def __init__(self, local_name, lang_code, region_code):
self._local_name = local_name
self._lang_code = lang_code
self._region_code = region_code
def __eq__(self, other):
if isinstance(other, _Language):
return '%r' % self == '%r' % other
return False
def __str__(self):
return '%s' % (self._local_name)
def __repr__(self):
return '%s, %s' % (self.__str__(), self.lang_attribute)
#Currently this returns the same as lang_code as we don't support regions
@property
def lang_attribute(self):
return self._lang_code
@property
def lang_code(self):
return self._lang_code
# Currently we are only supporting languages and not regions
class Language(object):
JA = _Language('日本語', 'ja', 'JP')
EN = _Language('English', 'en', 'US')
ES = _Language('Español', 'es', 'ES')
FR = _Language('Français', 'fr', 'FR')
DEFAULT_LANGUAGE = Language.EN
# Maps language codes to _Language objects.
language_map = {v.lang_code: v for k,v in Language.__dict__.items() if k[:1] != '_'}
def get_language(lang_code):
return language_map.get(lang_code)
| 27.238095 | 84 | 0.688811 |
ace668242123306cd9bddc67ad3a95a4d8df0c7b | 2,366 | py | Python | tests/test_string_calculator.py | JFF-Bohdan/tdd-kata | 2b113d31368620d72d899e01b2e8ad6cbc399b02 | [
"MIT"
] | null | null | null | tests/test_string_calculator.py | JFF-Bohdan/tdd-kata | 2b113d31368620d72d899e01b2e8ad6cbc399b02 | [
"MIT"
] | null | null | null | tests/test_string_calculator.py | JFF-Bohdan/tdd-kata | 2b113d31368620d72d899e01b2e8ad6cbc399b02 | [
"MIT"
] | null | null | null | import pytest
from work_classes.string_calculator import StringCalculator
def test_empty_string(mocker):
mc = mocky_ret(mocker, [])
task_string = ""
assert StringCalculator.add(task_string) == 0
mc.assert_called_once_with(task_string)
def test_one_value_sum(mocker):
mc = mocky_ret(mocker, [1])
task_string = "1"
assert StringCalculator.add(task_string) == 1
mc.assert_called_once_with(task_string)
def test_simple_input(mocker):
mc = mocky_ret(mocker, [1, 2])
task_string = "1,2"
assert StringCalculator.add(task_string) == 3
mc.assert_called_once_with(task_string)
def test_unknown_amount_of_numbers(mocker):
mc = mocky_ret(mocker, [1, 2, 3, 4])
task_string = "1,2, 3, 4 "
assert StringCalculator.add(task_string) == 10
mc.assert_called_once_with(task_string)
def test_exceptions_for_negative_number(mocker):
mc = mocky_ret(mocker, [1, 2, -3])
task_strins = "//;\n1;2;-3"
with pytest.raises(Exception) as context:
StringCalculator.add(task_strins)
msg = str(context.exception)
wrong_values = context.exception.wrong_data
assert "negatives not allowed" in msg
assert -3 in wrong_values
mc.parse_input.assert_called_once_with(task_strins)
def test_exception_for_negative_numbers_list(mocker):
mc = mocky_ret(mocker, [1, 2, -3, -4, -5])
task_string = "//;\n1;2;-3;-4;-5"
with pytest.raises(Exception) as context:
StringCalculator.add(task_string)
msg = str(context.exception)
wrong_values = context.exception.wrong_data
assert "negatives not allowed" in msg
wrong_numbers = [-3, -4, -5]
for value in wrong_numbers:
assert value in wrong_values
mc.assert_called_once_with(task_string)
def test_big_numbers_ignore(mocker):
mc = mocky_ret(mocker, [1, 2, 3, 4, 1500, 2000, 1001])
task_string = "1,2, 3, 4, 1500, 2000, 1001 "
v = StringCalculator.add(task_string)
assert v == 10
mc.assert_called_once_with(task_string)
def mocky_ret(mocker, mocked_return_value):
res = mocker.patch("work_classes.string_calculator.StringCalculatorInputParser.parse_input")
res.return_value = mocked_return_value
return res
| 26 | 97 | 0.666103 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.