repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cboling/onos-restconf-providers
|
tools/mockDevice/yangModel.py
|
1
|
5799
|
#
# Copyright 2015-present Boling Consulting Solutions, bcsw.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from yinFile import YINFile
import os
class YangModel:
"""
This class wraps the yang model and helps to hide some of the ugly details needed to
get this code-generated RESTCONF server working.
"""
_extmethods = None
_yang_class = None
_yin = None
def __init__(self, yin_path, yin_file, model_dir, verbose=False):
"""
YANG model initializer
:param: yin_path (string) Directory path to the location of the YIN file (and python file)
:param: yin_file (string) Name of the YIN file generated by 'pyang'. The name of the code-generated
python file is extracted from the YIN file (module name).
:param: model_dir (string) Directory name of where the model is.
Used in constructing the import statement.
:param: verbose (integer) Flag indicating if verbose output is to be presented
"""
self._yin = YINFile(os.path.join(yin_path, yin_file), verbose)
self._verbose = verbose
# Import the model
self._import_models(model_dir)
def __str__(self):
return 'YangModel: %s' % self.name
@property
def name(self):
"""
Get the module name.
@:returns: (string) YANG Model name
"""
return self._yin.module_name
@property
def package_name(self):
"""
Get the code-generated package name. The pyangbind package will replace hypens and
spaces with underscores.
@:returns: (string) Python code-generated module name
"""
return self.name.replace('-', '_').replace(' ', '_')
def _import_models(self, model_dir):
"""
This method is responsible for accessing the code-generated class and building up
a model that can be used to provide a simple RESTCONF server implementation of the
model.
:param: model_dir (string) Base directory name of where the model is.
"""
package = model_dir
module = self.package_name
_class = self.package_name
try:
if self._verbose > 0:
print 'Dynamic import -> from %s.%s import %s' % (package, module, _class)
yang_module = __import__('%s.%s' % (package, module), fromlist=[_class])
yang_class = getattr(yang_module, _class)
if self._verbose > 0:
print 'YANG class initially imported: %s' % yang_class
# Construct the extmethods for all appropriate nodes in the class and then
# reconstruct the class if needed with these method
self._extmethods = self._yin.get_extmethods(yang_class().get())
# Now reconstruct the class and pass in these methods
if self._extmethods is not None and len(self._extmethods) > 0:
self._yang_class = getattr(yang_module, _class)(extmethods=self._extmethods)
else:
self._yang_class = yang_class
except ImportError:
print 'Import Error while attempting to import class %s from %s.%s' % (_class, package, module)
# Instantiate the models the first time so we can generate all the paths within
# them so we can create extension methods that provide for RESTCONF required
# methods
###########################################################################
def _get_extmethods(self, element, path_base=''):
"""
A recursive function to convert a yang model into a extmethods dictionary
:param element: (list of YANGDynClass) Child elements of a the model instance
:param path_base: (dict) Existing dictionary of elements
:return: (dict) A Pyangbind compatible extmethods dictionary
"""
extmethods = {}
if element is None:
return extmethods
if isinstance(element, dict):
# print '%s is a dictionary of length %d' % (element, len(element))
# yang_name = getattr(element, "yang_name") if hasattr(element, "yang_name") else None
# is_container = hasattr(element, "get")
for key, value in element.items():
path = path_base + '/' + key
config = True
yang_name = getattr(value, "yang_name") if hasattr(element, "yang_name") else None
is_container = hasattr(value, "get")
try:
if value._is_leaf: # Protected, but I really need to know
config = value.flags.writeable
except AttributeError:
pass # Was another dictionary item or did not have a writeable flag
# Add this to our path
extmethods[path] = config
extmethods.update(self._get_extmethods(value, path_base=path))
return extmethods
def _fix_extmethods(self, extmethods):
"""
Walk through the methods and fix up any parents that have no children that
are writeable.
"""
# TODO: Need to implement
return extmethods
|
apache-2.0
| 7,545,779,531,491,814,000
| 36.173077
| 108
| 0.59562
| false
| 4.443678
| false
| false
| false
|
google-research/google-research
|
tft/libs/hyperparam_opt.py
|
1
|
13875
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Classes used for hyperparameter optimisation.
Two main classes exist:
1) HyperparamOptManager used for optimisation on a single machine/GPU.
2) DistributedHyperparamOptManager for multiple GPUs on different machines.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import shutil
import libs.utils as utils
import numpy as np
import pandas as pd
Deque = collections.deque
class HyperparamOptManager:
"""Manages hyperparameter optimisation using random search for a single GPU.
Attributes:
param_ranges: Discrete hyperparameter range for random search.
results: Dataframe of validation results.
fixed_params: Fixed model parameters per experiment.
saved_params: Dataframe of parameters trained.
best_score: Minimum validation loss observed thus far.
optimal_name: Key to best configuration.
hyperparam_folder: Where to save optimisation outputs.
"""
def __init__(self,
param_ranges,
fixed_params,
model_folder,
override_w_fixed_params=True):
"""Instantiates model.
Args:
param_ranges: Discrete hyperparameter range for random search.
fixed_params: Fixed model parameters per experiment.
model_folder: Folder to store optimisation artifacts.
override_w_fixed_params: Whether to override serialsed fixed model
parameters with new supplied values.
"""
self.param_ranges = param_ranges
self._max_tries = 1000
self.results = pd.DataFrame()
self.fixed_params = fixed_params
self.saved_params = pd.DataFrame()
self.best_score = np.Inf
self.optimal_name = ""
# Setup
# Create folder for saving if its not there
self.hyperparam_folder = model_folder
utils.create_folder_if_not_exist(self.hyperparam_folder)
self._override_w_fixed_params = override_w_fixed_params
def load_results(self):
"""Loads results from previous hyperparameter optimisation.
Returns:
A boolean indicating if previous results can be loaded.
"""
print("Loading results from", self.hyperparam_folder)
results_file = os.path.join(self.hyperparam_folder, "results.csv")
params_file = os.path.join(self.hyperparam_folder, "params.csv")
if os.path.exists(results_file) and os.path.exists(params_file):
self.results = pd.read_csv(results_file, index_col=0)
self.saved_params = pd.read_csv(params_file, index_col=0)
if not self.results.empty:
self.results.at["loss"] = self.results.loc["loss"].apply(float)
self.best_score = self.results.loc["loss"].min()
is_optimal = self.results.loc["loss"] == self.best_score
self.optimal_name = self.results.T[is_optimal].index[0]
return True
return False
def _get_params_from_name(self, name):
"""Returns previously saved parameters given a key."""
params = self.saved_params
selected_params = dict(params[name])
if self._override_w_fixed_params:
for k in self.fixed_params:
selected_params[k] = self.fixed_params[k]
return selected_params
def get_best_params(self):
"""Returns the optimal hyperparameters thus far."""
optimal_name = self.optimal_name
return self._get_params_from_name(optimal_name)
def clear(self):
"""Clears all previous results and saved parameters."""
shutil.rmtree(self.hyperparam_folder)
os.makedirs(self.hyperparam_folder)
self.results = pd.DataFrame()
self.saved_params = pd.DataFrame()
def _check_params(self, params):
"""Checks that parameter map is properly defined."""
valid_fields = list(self.param_ranges.keys()) + list(
self.fixed_params.keys())
invalid_fields = [k for k in params if k not in valid_fields]
missing_fields = [k for k in valid_fields if k not in params]
if invalid_fields:
raise ValueError("Invalid Fields Found {} - Valid ones are {}".format(
invalid_fields, valid_fields))
if missing_fields:
raise ValueError("Missing Fields Found {} - Valid ones are {}".format(
missing_fields, valid_fields))
def _get_name(self, params):
"""Returns a unique key for the supplied set of params."""
self._check_params(params)
fields = list(params.keys())
fields.sort()
return "_".join([str(params[k]) for k in fields])
def get_next_parameters(self, ranges_to_skip=None):
"""Returns the next set of parameters to optimise.
Args:
ranges_to_skip: Explicitly defines a set of keys to skip.
"""
if ranges_to_skip is None:
ranges_to_skip = set(self.results.index)
if not isinstance(self.param_ranges, dict):
raise ValueError("Only works for random search!")
param_range_keys = list(self.param_ranges.keys())
param_range_keys.sort()
def _get_next():
"""Returns next hyperparameter set per try."""
parameters = {
k: np.random.choice(self.param_ranges[k]) for k in param_range_keys
}
# Adds fixed params
for k in self.fixed_params:
parameters[k] = self.fixed_params[k]
return parameters
for _ in range(self._max_tries):
parameters = _get_next()
name = self._get_name(parameters)
if name not in ranges_to_skip:
return parameters
raise ValueError("Exceeded max number of hyperparameter searches!!")
def update_score(self, parameters, loss, model, info=""):
"""Updates the results from last optimisation run.
Args:
parameters: Hyperparameters used in optimisation.
loss: Validation loss obtained.
model: Model to serialised if required.
info: Any ancillary information to tag on to results.
Returns:
Boolean flag indicating if the model is the best seen so far.
"""
if np.isnan(loss):
loss = np.Inf
if not os.path.isdir(self.hyperparam_folder):
os.makedirs(self.hyperparam_folder)
name = self._get_name(parameters)
is_optimal = self.results.empty or loss < self.best_score
# save the first model
if is_optimal:
# Try saving first, before updating info
if model is not None:
print("Optimal model found, updating")
model.save(self.hyperparam_folder)
self.best_score = loss
self.optimal_name = name
self.results[name] = pd.Series({"loss": loss, "info": info})
self.saved_params[name] = pd.Series(parameters)
self.results.to_csv(os.path.join(self.hyperparam_folder, "results.csv"))
self.saved_params.to_csv(os.path.join(self.hyperparam_folder, "params.csv"))
return is_optimal
class DistributedHyperparamOptManager(HyperparamOptManager):
"""Manages distributed hyperparameter optimisation across many gpus."""
def __init__(self,
param_ranges,
fixed_params,
root_model_folder,
worker_number,
search_iterations=1000,
num_iterations_per_worker=5,
clear_serialised_params=False):
"""Instantiates optimisation manager.
This hyperparameter optimisation pre-generates #search_iterations
hyperparameter combinations and serialises them
at the start. At runtime, each worker goes through their own set of
parameter ranges. The pregeneration
allows for multiple workers to run in parallel on different machines without
resulting in parameter overlaps.
Args:
param_ranges: Discrete hyperparameter range for random search.
fixed_params: Fixed model parameters per experiment.
root_model_folder: Folder to store optimisation artifacts.
worker_number: Worker index definining which set of hyperparameters to
test.
search_iterations: Maximum numer of random search iterations.
num_iterations_per_worker: How many iterations are handled per worker.
clear_serialised_params: Whether to regenerate hyperparameter
combinations.
"""
max_workers = int(np.ceil(search_iterations / num_iterations_per_worker))
# Sanity checks
if worker_number > max_workers:
raise ValueError(
"Worker number ({}) cannot be larger than the total number of workers!"
.format(max_workers))
if worker_number > search_iterations:
raise ValueError(
"Worker number ({}) cannot be larger than the max search iterations ({})!"
.format(worker_number, search_iterations))
print("*** Creating hyperparameter manager for worker {} ***".format(
worker_number))
hyperparam_folder = os.path.join(root_model_folder, str(worker_number))
super().__init__(
param_ranges,
fixed_params,
hyperparam_folder,
override_w_fixed_params=True)
serialised_ranges_folder = os.path.join(root_model_folder, "hyperparams")
if clear_serialised_params:
print("Regenerating hyperparameter list")
if os.path.exists(serialised_ranges_folder):
shutil.rmtree(serialised_ranges_folder)
utils.create_folder_if_not_exist(serialised_ranges_folder)
self.serialised_ranges_path = os.path.join(
serialised_ranges_folder, "ranges_{}.csv".format(search_iterations))
self.hyperparam_folder = hyperparam_folder # override
self.worker_num = worker_number
self.total_search_iterations = search_iterations
self.num_iterations_per_worker = num_iterations_per_worker
self.global_hyperparam_df = self.load_serialised_hyperparam_df()
self.worker_search_queue = self._get_worker_search_queue()
@property
def optimisation_completed(self):
return False if self.worker_search_queue else True
def get_next_parameters(self):
"""Returns next dictionary of hyperparameters to optimise."""
param_name = self.worker_search_queue.pop()
params = self.global_hyperparam_df.loc[param_name, :].to_dict()
# Always override!
for k in self.fixed_params:
print("Overriding saved {}: {}".format(k, self.fixed_params[k]))
params[k] = self.fixed_params[k]
return params
def load_serialised_hyperparam_df(self):
"""Loads serialsed hyperparameter ranges from file.
Returns:
DataFrame containing hyperparameter combinations.
"""
print("Loading params for {} search iterations form {}".format(
self.total_search_iterations, self.serialised_ranges_path))
if os.path.exists(self.serialised_ranges_folder):
df = pd.read_csv(self.serialised_ranges_path, index_col=0)
else:
print("Unable to load - regenerating serach ranges instead")
df = self.update_serialised_hyperparam_df()
return df
def update_serialised_hyperparam_df(self):
"""Regenerates hyperparameter combinations and saves to file.
Returns:
DataFrame containing hyperparameter combinations.
"""
search_df = self._generate_full_hyperparam_df()
print("Serialising params for {} search iterations to {}".format(
self.total_search_iterations, self.serialised_ranges_path))
search_df.to_csv(self.serialised_ranges_path)
return search_df
def _generate_full_hyperparam_df(self):
"""Generates actual hyperparameter combinations.
Returns:
DataFrame containing hyperparameter combinations.
"""
np.random.seed(131) # for reproducibility of hyperparam list
name_list = []
param_list = []
for _ in range(self.total_search_iterations):
params = super().get_next_parameters(name_list)
name = self._get_name(params)
name_list.append(name)
param_list.append(params)
full_search_df = pd.DataFrame(param_list, index=name_list)
return full_search_df
def clear(self): # reset when cleared
"""Clears results for hyperparameter manager and resets."""
super().clear()
self.worker_search_queue = self._get_worker_search_queue()
def load_results(self):
"""Load results from file and queue parameter combinations to try.
Returns:
Boolean indicating if results were successfully loaded.
"""
success = super().load_results()
if success:
self.worker_search_queue = self._get_worker_search_queue()
return success
def _get_worker_search_queue(self):
"""Generates the queue of param combinations for current worker.
Returns:
Queue of hyperparameter combinations outstanding.
"""
global_df = self.assign_worker_numbers(self.global_hyperparam_df)
worker_df = global_df[global_df["worker"] == self.worker_num]
left_overs = [s for s in worker_df.index if s not in self.results.columns]
return Deque(left_overs)
def assign_worker_numbers(self, df):
"""Updates parameter combinations with the index of the worker used.
Args:
df: DataFrame of parameter combinations.
Returns:
Updated DataFrame with worker number.
"""
output = df.copy()
n = self.total_search_iterations
batch_size = self.num_iterations_per_worker
max_worker_num = int(np.ceil(n / batch_size))
worker_idx = np.concatenate([
np.tile(i + 1, self.num_iterations_per_worker)
for i in range(max_worker_num)
])
output["worker"] = worker_idx[:len(output)]
return output
|
apache-2.0
| 4,648,941,923,380,568,000
| 30.678082
| 84
| 0.683099
| false
| 3.945124
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/batch_job_service/pagers.py
|
1
|
3313
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, Callable, Iterable, Sequence, Tuple
from google.ads.googleads.v8.services.types import batch_job_service
class ListBatchJobResultsPager:
"""A pager for iterating through ``list_batch_job_results`` requests.
This class thinly wraps an initial
:class:`google.ads.googleads.v8.services.types.ListBatchJobResultsResponse` object, and
provides an ``__iter__`` method to iterate through its
``results`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBatchJobResults`` requests and continue to iterate
through the ``results`` field on the
corresponding responses.
All the usual :class:`google.ads.googleads.v8.services.types.ListBatchJobResultsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., batch_job_service.ListBatchJobResultsResponse],
request: batch_job_service.ListBatchJobResultsRequest,
response: batch_job_service.ListBatchJobResultsResponse,
metadata: Sequence[Tuple[str, str]] = (),
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (:class:`google.ads.googleads.v8.services.types.ListBatchJobResultsRequest`):
The initial request object.
response (:class:`google.ads.googleads.v8.services.types.ListBatchJobResultsResponse`):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = batch_job_service.ListBatchJobResultsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[batch_job_service.ListBatchJobResultsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(
self._request, metadata=self._metadata
)
yield self._response
def __iter__(self) -> Iterable[batch_job_service.BatchJobResult]:
for page in self.pages:
yield from page.results
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
apache-2.0
| 1,347,235,011,634,477,000
| 39.901235
| 99
| 0.674011
| false
| 4.364954
| false
| false
| false
|
mkieszek/odoo
|
addons/purchase/purchase.py
|
1
|
104421
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import pytz
from openerp import SUPERUSER_ID, workflow
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record_list, browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
from openerp.exceptions import UserError
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj = self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in line.taxes_id.compute_all(line.price_unit, cur, line.product_qty, product=line.product_id, partner=order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
pol_obj = self.pool.get('purchase.order.line')
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
pol_ids = pol_obj.search(cr, uid, [
('order_id', '=', po.id), '|', ('date_planned', '=', po.minimum_planned_date), ('date_planned', '<', value)
], context=context)
pol_obj.write(cr, uid, pol_ids, {'date_planned': value}, context=context)
self.invalidate_cache(cr, uid, context=context)
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.order_id, sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
purchase_order_line p on (p.id=m.purchase_line_id)
WHERE
p.order_id IN %s GROUP BY m.state, p.order_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_purchase_order(self, cr, uid, ids, context=None):
result = {}
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line if line.state != 'cancel')
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
type_obj = self.pool.get('stock.picking.type')
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)], context=context)
if not types:
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id', '=', False)], context=context)
if not types:
raise UserError(_("Make sure you have at least an incoming picking type defined"))
return types[0]
def _get_picking_ids(self, cr, uid, ids, field_names, args, context=None):
res = {}
for po_id in ids:
res[po_id] = []
query = """
SELECT picking_id, po.id FROM stock_picking p, stock_move m, purchase_order_line pol, purchase_order po
WHERE po.id in %s and po.id = pol.order_id and pol.id = m.purchase_line_id and m.picking_id = p.id
GROUP BY picking_id, po.id
"""
cr.execute(query, (tuple(ids), ))
picks = cr.fetchall()
for pick_id, po_id in picks:
res[po_id].append(pick_id)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
return {
purchase.id: {
'shipment_count': len(purchase.picking_ids),
'invoice_count': len(purchase.invoice_ids),
}
for purchase in self.browse(cr, uid, ids, context=context)
}
STATE_SELECTION = [
('draft', 'Draft RFQ'),
('sent', 'RFQ Sent'),
('bid', 'Bid Received'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Confirmed'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
READONLY_STATES = {
'confirmed': [('readonly', True)],
'approved': [('readonly', True)],
'done': [('readonly', True)]
}
_columns = {
'name': fields.char('Order Reference', required=True, select=True, copy=False,
help="Unique number of the purchase order, "
"computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', copy=False,
help="Reference of the document that generated this purchase order "
"request; a sales order or an internal procurement request."),
'partner_ref': fields.char('Vendor Reference', states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=False,
help="Reference of the sales order or bid sent by your vendor. "
"It's mainly used to do the matching when you receive the "
"products as this reference is usually written on the "
"delivery order sent by your vendor."),
'date_order':fields.datetime('Order Date', required=True, states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)]},
select=True, help="Depicts the date where the Quotation should be validated and converted into a Purchase Order, by default it's the creation date.",
copy=False),
'date_approve':fields.date('Date Approved', readonly=1, select=True, copy=False,
help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Vendor', required=True, states=READONLY_STATES,
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states=READONLY_STATES,
help="Put an address if you want to deliver directly from the vendor to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states=READONLY_STATES),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states=READONLY_STATES, help="The pricelist sets the currency used for this purchase order. It also computes the vendor price for the selected products/quantities."),
'currency_id': fields.many2one('res.currency','Currency', required=True, states=READONLY_STATES),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True,
help="The status of the purchase order or the quotation request. "
"A request for quotation is a purchase order in a 'Draft' status. "
"Then the order has to be confirmed by the user, the status switch "
"to 'Confirmed'. Then the vendor must confirm the order to change "
"the status to 'Approved'. When the purchase order is paid and "
"received, the status becomes 'Done'. If a cancel action occurs in "
"the invoice or in the receipt of goods, the status becomes "
"in exception.",
select=True, copy=False),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines',
states={'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=True),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True, copy=False),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id',
'invoice_id', 'Invoices', copy=False,
help="Invoices generated for a purchase order"),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking List', help="This is the list of receipts that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, copy=False,
help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', copy=False,
help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)],'bid':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control / On Purchase Order lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Based on incoming shipments: let you create an invoice when receipts are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='datetime', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
'purchase.order': (_get_purchase_order, ['order_line'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits=0, string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits=0, string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits=0, string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The total amount"),
'fiscal_position_id': fields.many2one('account.fiscal.position', oldname='fiscal_position', string='Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'incoterm_id': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
'bid_date': fields.date('Bid Received On', readonly=True, help="Date on which the bid was received"),
'bid_validity': fields.date('Bid Valid Until', help="Date on which the bid expired"),
'picking_type_id': fields.many2one('stock.picking.type', 'Deliver To', help="This will determine picking type of incoming shipment", required=True,
states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)], 'done': [('readonly', True)]}),
'related_location_id': fields.related('picking_type_id', 'default_location_dest_id', type='many2one', relation='stock.location', string="Related location", store=True),
'related_usage': fields.related('location_id', 'usage', type='char'),
'shipment_count': fields.function(_count_all, type='integer', string='Incoming Shipments', multi=True),
'invoice_count': fields.function(_count_all, type='integer', string='Invoices', multi=True),
'group_id': fields.many2one('procurement.group', string="Procurement Group"),
}
_defaults = {
'date_order': fields.datetime.now,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
'currency_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id,
'picking_type_id': _get_picking_in,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').next_by_code(cr, uid, 'purchase.order') or '/'
context = dict(context or {}, mail_create_nolog=True)
order = super(purchase_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [order], body=_("RFQ created"), context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise UserError(_('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_workflow(cr, uid, unlink_ids, 'purchase_cancel')
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'state' in init_values and record.state == 'approved':
return 'purchase.mt_rfq_approved'
elif 'state' in init_values and record.state == 'confirmed':
return 'purchase.mt_rfq_confirmed'
elif 'state' in init_values and record.state == 'done':
return 'purchase.mt_rfq_done'
return super(purchase_order, self)._track_subtype(cr, uid, ids, init_values, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
if status in ('draft', 'cancel'):
order_line_ids += [po_line.id for po_line in order.order_line]
else: # Do not change the status of already cancelled lines
order_line_ids += [po_line.id for po_line in order.order_line if po_line.state != 'cancel']
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
#Destination address is used when dropshipping
def onchange_dest_address_id(self, cr, uid, ids, address_id, context=None):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {}
supplier = address.browse(cr, uid, address_id, context=context)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
value = {}
if picking_type_id:
picktype = self.pool.get("stock.picking.type").browse(cr, uid, picking_type_id, context=context)
if picktype.default_location_dest_id:
value.update({'location_id': picktype.default_location_dest_id.id, 'related_usage': picktype.default_location_dest_id.usage})
value.update({'related_location_id': picktype.default_location_dest_id.id})
return {'value': value}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position_id': False,
'payment_term_id': False,
}}
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
fp = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, context=context)
supplier = partner.browse(cr, uid, partner_id, context=context)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position_id': fp or supplier.property_account_position_id and supplier.property_account_position_id.id or False,
'payment_term_id': supplier.property_supplier_payment_term_id.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
action_id = mod_obj.xmlid_to_res_id(cr, uid, 'account.action_invoice_tree2')
result = act_obj.read(cr, uid, action_id, context=context)
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise UserError(_('Please create Invoices.'))
if len(inv_ids) > 1:
result['domain'] = [('id', 'in', inv_ids)]
else:
res = mod_obj.xmlid_to_res_id(cr, uid, 'account.invoice_supplier_form')
result['views'] = [(res, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
context = dict(context or {})
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line if line.state != 'cancel']})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Vendor Bills'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing picking orders of given purchase order ids.
'''
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
dummy, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree'))
action = self.pool.get('ir.actions.act_window').read(cr, uid, action_id, context=context)
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
#override the context to get rid of the default filtering on picking type
action['context'] = {}
#choose the view_mode accordingly
if len(pick_ids) > 1:
action['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = pick_ids and pick_ids[0] or False
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def wkf_bid_received(self, cr, uid, ids, context=None):
bid_date = fields.date.context_today(self, cr, uid, context=context)
self.message_post(cr, uid, ids, body=_("Bid received on %s") % (bid_date), context=context)
return self.write(cr, uid, ids, {'state':'bid', 'bid_date': bid_date})
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
if not context:
context= {}
ir_model_data = self.pool.get('ir.model.data')
try:
if context.get('send_rfq', False):
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
else:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase_done')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'send_rfq')
return self.pool['report'].get_action(cr, uid, ids, 'purchase.report_purchasequotation', context=context)
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not any(line.state != 'cancel' for line in po.order_line):
raise UserError(_('You cannot confirm a purchase order without any purchase order line.'))
if po.invoice_method == 'picking' and not any([l.product_id and l.product_id.type in ('product', 'consu') and l.state != 'cancel' for l in po.order_line]):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm a purchase order with Invoice Control Method 'Based on incoming shipments' that doesn't contain any stockable item."))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense_id.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ_id.id
if not acc_id:
raise UserError(_('Define an expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ_id', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position_id or False
#For anglo-saxon accounting
account_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)
if po_line.company_id.anglo_saxon_accounting and po_line.product_id and not po_line.product_id.type == 'service':
acc_id = po_line.product_id.property_stock_account_input and po_line.product_id.property_stock_account_input.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_stock_account_input_categ_id and po_line.product_id.categ_id.property_stock_account_input_categ_id.id
if acc_id:
fpos = po_line.order_id.fiscal_position_id or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, acc_id)
return account_id
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_ids': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
'purchase_line_id': order_line.id,
}
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""Prepare the dict of values to create the new invoice for a
purchase order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: purchase.order record to invoice
:param list(int) line_ids: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
journal_ids = self.pool['account.journal'].search(
cr, uid, [('type', '=', 'purchase'),
('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise UserError(_('Define purchase journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
return {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': order.partner_id.property_account_payable_id.id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line_ids': [(6, 0, line_ids)],
'origin': order.name,
'fiscal_position_id': order.fiscal_position_id.id or False,
'payment_term_id': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
self.set_order_line_status(cr, uid, ids, 'draft', context=context)
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def wkf_po_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done'}, context=context)
self.set_order_line_status(cr, uid, ids, 'done', context=context)
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
context = dict(context or {})
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.state == 'cancel':
continue
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
# get invoice data and create invoice
inv_data = self._prepare_invoice(cr, uid, order, inv_lines, context=context)
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]})
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
context = context or {}
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
if pick.state == 'done':
raise UserError(_('Unable to cancel the purchase order %s.') % (purchase.name) + _('You have already received some goods for it. '))
# Check action_cancel
self.pool.get('stock.picking').action_cancel(cr, uid, [x.id for x in purchase.picking_ids if x.state != 'cancel'], context=context)
# Check procurements not related to stock move yet
if not context.get('cancel_procurement'):
cancel_procurements = []
cancel_moves = []
exception_procurements = []
for line in purchase.order_line:
if line.procurement_ids:
cancel_procurements += [x.id for x in line.procurement_ids if x.state not in ('cancel', 'exception') and x.rule_id.propagate]
exception_procurements += [x.id for x in line.procurement_ids if x.state not in ('cancel', 'exception') and not x.rule_id.propagate]
cancel_moves += [x.move_dest_id.id for x in line.procurement_ids if x.move_dest_id and x.move_dest_id.state!='cancel' and x.rule_id.propagate]
if cancel_moves:
cancel_moves = list(set(cancel_moves))
self.pool['stock.move'].action_cancel(cr, uid, cancel_moves, context=context)
if cancel_procurements:
cancel_procurements = list(set(cancel_procurements))
self.pool['procurement.order'].write(cr, uid, cancel_procurements, {'state': 'cancel'}, context=context)
if exception_procurements:
exception_procurements = list(set(exception_procurements))
self.pool['procurement.order'].write(cr, uid, exception_procurements, {'state': 'exception'}, context=context)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel', 'draft'):
raise UserError(_("Unable to cancel this purchase order.") + " " + _("You must first cancel all invoices related to this purchase order."))
self.pool.get('account.invoice') \
.signal_workflow(cr, uid, map(attrgetter('id'), purchase.invoice_ids), 'invoice_cancel')
self.signal_workflow(cr, uid, ids, 'purchase_cancel')
return True
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
''' prepare the stock move data from the PO line. This function returns a list of dictionary ready to be used in stock.move's create()'''
product_uom = self.pool.get('product.uom')
price_unit = order_line.price_unit
if order_line.product_uom.id != order_line.product_id.uom_id.id:
price_unit *= order_line.product_uom.factor / order_line.product_id.uom_id.factor
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
res = []
move_template = {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order.date_order,
'date_expected': order_line.date_planned,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit,
'picking_type_id': order.picking_type_id.id,
'group_id': group_id,
'procurement_id': False,
'origin': order.name,
'route_ids': order.picking_type_id.warehouse_id and [(6, 0, [x.id for x in order.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':order.picking_type_id.warehouse_id.id,
'invoice_state': order.invoice_method == 'picking' and '2binvoiced' or 'none',
}
diff_quantity = order_line.product_qty
for procurement in order_line.procurement_ids:
procurement_qty = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, to_uom_id=order_line.product_uom.id)
tmp = move_template.copy()
tmp.update({
'product_uom_qty': min(procurement_qty, diff_quantity),
'product_uos_qty': min(procurement_qty, diff_quantity),
'move_dest_id': procurement.move_dest_id.id, #move destination is same as procurement destination
'procurement_id': procurement.id,
'invoice_state': procurement.rule_id.invoice_state or (procurement.location_id and procurement.location_id.usage == 'customer' and procurement.invoice_state=='2binvoiced' and '2binvoiced') or (order.invoice_method == 'picking' and '2binvoiced') or 'none', #dropship case takes from sale
'propagate': procurement.rule_id.propagate,
})
diff_quantity -= min(procurement_qty, diff_quantity)
res.append(tmp)
#if the order line has a bigger quantity than the procurement it was for (manually changed or minimal quantity), then
#split the future stock move in two because the route followed may be different.
if float_compare(diff_quantity, 0.0, precision_rounding=order_line.product_uom.rounding) > 0:
move_template['product_uom_qty'] = diff_quantity
move_template['product_uos_qty'] = diff_quantity
res.append(move_template)
return res
def _create_stock_moves(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates appropriate stock moves for given order lines, whose can optionally create a
picking if none is given or no suitable is found, then confirms the moves, makes them
available, and confirms the pickings.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise a standard
incoming picking will be created to wrap the stock moves (default behavior of the stock.move)
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: None
"""
stock_move = self.pool.get('stock.move')
todo_moves = []
if order.group_id:
new_group = order.group_id.id
else:
new_group = self.pool.get("procurement.group").create(cr, uid, {'name': order.name, 'partner_id': order.partner_id.id}, context=context)
for order_line in order_lines:
if order_line.state == 'cancel':
continue
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
for vals in self._prepare_order_line_move(cr, uid, order, order_line, picking_id, new_group, context=context):
move = stock_move.create(cr, uid, vals, context=context)
todo_moves.append(move)
todo_moves = stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
def test_moves_done(self, cr, uid, ids, context=None):
'''PO is done at the delivery side if all the incoming shipments are done'''
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state != 'done':
return False
return True
def test_moves_except(self, cr, uid, ids, context=None):
''' PO is in exception at the delivery side if one of the picking is canceled
and the other pickings are completed (done or canceled)
'''
at_least_one_canceled = False
alldoneorcancel = True
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state == 'cancel':
at_least_one_canceled = True
if picking.state not in ['done', 'cancel']:
alldoneorcancel = False
return at_least_one_canceled and alldoneorcancel
def move_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.move_ids]
return res
def action_picking_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids):
picking_vals = {
'picking_type_id': order.picking_type_id.id,
'partner_id': order.partner_id.id,
'date': order.date_order,
'origin': order.name,
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
}
picking_id = self.pool.get('stock.picking').create(cr, uid, picking_vals, context=context)
self._create_stock_moves(cr, uid, order, order.order_line, picking_id, context=context)
return picking_id
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
# Do check on related procurements:
proc_obj = self.pool.get("procurement.order")
po_lines = []
for po in self.browse(cr, uid, ids, context=context):
po_lines += [x.id for x in po.order_line if x.state != 'cancel']
if po_lines:
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', po_lines)], context=context)
if procs:
proc_obj.check(cr, uid, procs, context=context)
for id in ids:
self.message_post(cr, uid, id, body=_("Products received"), context=context)
return True
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist, same currency
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, browse_record_list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
context = dict(context or {})
# Compute what the new orders should contain
new_orders = {}
order_lines_to_move = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id', 'currency_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
order_lines_to_move.setdefault(order_key, [])
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'picking_type_id': porder.picking_type_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'currency_id': porder.currency_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position_id': porder.fiscal_position_id and porder.fiscal_position_id.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
order_lines_to_move[order_key] += [order_line.id for order_line in porder.order_line
if order_line.state != 'cancel']
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(6, 0, order_lines_to_move[order_key])]
# create the new order
context.update({'mail_create_nolog': True})
neworder_id = self.create(cr, uid, order_data)
self.message_post(cr, uid, [neworder_id], body=_("RFQ created"), context=context)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_workflow(cr, uid, [old_id], 'purchase_cancel')
return orders_info
def _set_po_lines_invoiced(self, cr, uid, ids, context=None):
for po in self.browse(cr, uid, ids, context=context):
is_invoiced = []
if po.invoice_method == 'picking':
# We determine the invoiced state of the PO line based on the invoiced state
# of the associated moves. This should cover all possible cases:
# - all moves are done and invoiced
# - a PO line is split into multiple moves (e.g. if multiple pickings): some
# pickings are done, some are in progress, some are cancelled
for po_line in po.order_line:
if (po_line.move_ids and
all(move.state in ('done', 'cancel') for move in po_line.move_ids) and
not all(move.state == 'cancel' for move in po_line.move_ids) and
all(move.invoice_state == 'invoiced' for move in po_line.move_ids if move.state == 'done')):
is_invoiced.append(po_line.id)
else:
for po_line in po.order_line:
if (po_line.invoice_lines and
all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines)):
is_invoiced.append(po_line.id)
if is_invoiced:
self.pool['purchase.order.line'].write(cr, uid, is_invoiced, {'invoiced': True})
workflow.trg_write(uid, 'purchase.order', po.id, cr)
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
for line in self.browse(cr, uid, ids, context=context):
cur = line.order_id.pricelist_id.currency_id
res[line.id] = line.taxes_id.compute_all(line.price_unit, cur, line.product_qty, product=line.product_id, partner=line.order_id.partner_id)['total_excluded']
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.datetime('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits=0),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')],
'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel',
'order_line_id', 'invoice_id', 'Invoice Lines',
readonly=True, copy=False),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'partner_id': fields.related('order_id', 'partner_id', string='Partner', readonly=True, type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id', 'date_order', string='Order Date', readonly=True, type="datetime"),
'procurement_ids': fields.one2many('procurement.order', 'purchase_line_id', string='Associated procurements'),
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def unlink(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.state in ['approved', 'done'] and line.state not in ['draft', 'cancel']:
raise UserError(_('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
procurement_obj = self.pool.get('procurement.order')
procurement_ids_to_except = procurement_obj.search(cr, uid, [('purchase_line_id', 'in', ids)], context=context)
if procurement_ids_to_except:
for po_id in procurement_ids_to_except:
procurement_obj.message_post(cr, uid, po_id, body=_('Purchase order line deleted.'), context=context)
procurement_obj.write(cr, uid, procurement_ids_to_except, {'state': 'exception'}, context=context)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, state=state, replace=False, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order field, as a string in
DEFAULT_SERVER_DATETIME_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(days=supplier_delay)
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
# We will group by PO first, so we do the check only once for each PO
purchase_orders = list(set([x.order_id for x in self.browse(cr, uid, ids, context=context)]))
for purchase in purchase_orders:
if all([l.state == 'cancel' for l in purchase.order_line]):
self.pool.get('purchase.order').action_cancel(cr, uid, [purchase.id], context=context)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', replace=True, context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
if not uom_id:
uom_id = self.default_get(cr, uid, ['product_uom'], context=context).get('product_uom', False)
res['value']['product_uom'] = uom_id
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise UserError(_('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise UserError(_('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
if replace:
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.datetime.now()
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected vendor has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
price = price_unit
if price_unit is False or price_unit is None:
# - determine price_unit and taxes_id
if pricelist_id:
date_order_str = datetime.strptime(date_order, DEFAULT_SERVER_DATETIME_FORMAT).strftime(DEFAULT_SERVER_DATE_FORMAT)
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order_str})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
return [('buy', _('Buy'))] + super(procurement_rule, self)._get_action(cr, uid, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line'),
'purchase_id': fields.related('purchase_line_id', 'order_id', type='many2one', relation='purchase.order', string='Purchase Order'),
}
def propagate_cancels(self, cr, uid, ids, context=None):
purchase_line_obj = self.pool.get('purchase.order.line')
lines_to_cancel = []
uom_obj = self.pool.get("product.uom")
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.rule_id.action == 'buy' and procurement.purchase_line_id:
if procurement.purchase_line_id.state not in ('draft', 'cancel'):
raise UserError(
_('Can not cancel this procurement like this as the related purchase order has been confirmed already. Please cancel the purchase order first. '))
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, cancel=True, context=context)
if new_qty != procurement.purchase_line_id.product_qty:
purchase_line_obj.write(cr, uid, [procurement.purchase_line_id.id], {'product_qty': new_qty, 'price_unit': new_price}, context=context)
if float_compare(new_qty, 0.0, precision_rounding=procurement.product_uom.rounding) != 1:
if procurement.purchase_line_id.id not in lines_to_cancel:
lines_to_cancel += [procurement.purchase_line_id.id]
if lines_to_cancel:
purchase_line_obj.action_cancel(cr, uid, lines_to_cancel, context=context)
purchase_line_obj.unlink(cr, uid, lines_to_cancel, context=context)
return super(procurement_order, self).propagate_cancels(cr, uid, ids, context=context)
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy':
#make a purchase order for the procurement
return self.make_po(cr, uid, [procurement.id], context=context)[procurement.id]
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
#TODO: Autocommit needed?
def run(self, cr, uid, ids, autocommit=False, context=None):
procs = self.browse(cr, uid, ids, context=context)
to_assign = [x for x in procs if x.state not in ('running', 'done')]
self._assign_multi(cr, uid, to_assign, context=context)
buy_ids = [x.id for x in to_assign if x.rule_id and x.rule_id.action == 'buy']
if buy_ids:
result_dict = self.make_po(cr, uid, buy_ids, context=context)
runnings = []
exceptions = []
for proc in result_dict.keys():
if result_dict[proc]:
runnings += [proc]
else:
exceptions += [proc]
if runnings:
self.write(cr, uid, runnings, {'state': 'running'}, context=context)
if exceptions:
self.write(cr, uid, exceptions, {'state': 'exception'}, context=context)
set_others = set(ids) - set(buy_ids)
return super(procurement_order, self).run(cr, uid, list(set_others), context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.purchase_line_id:
if procurement.purchase_line_id.order_id.shipped:
return True
elif procurement.move_ids:
moves = self.pool.get('stock.move').browse(cr, uid, [x.id for x in procurement.move_ids], context=context)
return all(move.state == 'done' for move in moves)
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _check_supplier_info(self, cr, uid, ids, context=None):
''' Check the vendor info field of a product and write an error message on the procurement if needed.
Returns True if all needed information is there, False if some configuration mistake is detected.
'''
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Vendor of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No vendor defined for this product !')
elif not partner:
message = _('No default vendor defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the vendor')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise UserError(_('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_product_supplier(self, cr, uid, procurement, context=None):
''' returns the main vendor of the procurement's product given as argument'''
supplierinfo = self.pool['product.supplierinfo']
company_supplier = supplierinfo.search(cr, uid,
[('product_tmpl_id', '=', procurement.product_id.product_tmpl_id.id), ('company_id', '=', procurement.company_id.id)], limit=1, context=context)
if company_supplier:
return supplierinfo.browse(cr, uid, company_supplier[0], context=context).name
return procurement.product_id.seller_id
def _get_po_line_values_from_procs(self, cr, uid, procurements, partner, schedule_date, context=None):
res = {}
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
pricelist_id = partner.property_product_pricelist_purchase.id
prices_qty = []
qty = {}
for procurement in procurements:
seller_qty = procurement.product_id.seller_qty if procurement.location_id.usage != 'customer' else 0.0
uom_id = procurement.product_id.uom_po_id.id
qty[procurement.product_id.id] = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty[procurement.product_id.id] = max(qty[procurement.product_id.id], seller_qty)
prices_qty += [(procurement.product_id, qty[procurement.product_id.id], partner)]
prices = pricelist_obj.price_get_multi(cr, uid, [pricelist_id], prices_qty)
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner.id})
names = prod_obj.name_get(cr, uid, [x.product_id.id for x in procurements], context=context)
names_dict = {}
for id, name in names:
names_dict[id] = name
for procurement in procurements:
taxes_ids = procurement.product_id.supplier_taxes_id
# It is necessary to have the appropriate fiscal position to get the right tax mapping
fp = acc_pos_obj.get_fiscal_position(cr, uid, None, partner.id, context=context)
if fp:
fp = acc_pos_obj.browse(cr, uid, fp, context=context)
taxes = acc_pos_obj.map_tax(cr, uid, fp, taxes_ids)
name = names_dict[procurement.product_id.id]
if procurement.product_id.description_purchase:
name += '\n' + procurement.product_id.description_purchase
price = prices[procurement.product_id.id][pricelist_id]
price = uom_obj._compute_price(cr, uid, procurement.product_uom.id, price, to_uom_id=procurement.product_id.product_tmpl_id.uom_po_id.id)
values = {
'name': name,
'product_qty': qty[procurement.product_id.id],
'product_id': procurement.product_id.id,
'product_uom': procurement.product_id.uom_po_id.id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'taxes_id': [(6, 0, taxes)],
'procurement_ids': [(4, procurement.id)]
}
res[procurement.id] = values
return res
def _calc_new_qty_price(self, cr, uid, procurement, po_line=None, cancel=False, context=None):
if not po_line:
po_line = procurement.purchase_line_id
uom_obj = self.pool.get('product.uom')
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty,
procurement.product_id.uom_po_id.id)
if cancel:
qty = -qty
# Make sure we use the minimum quantity of the partner corresponding to the PO
# This does not apply in case of dropshipping
supplierinfo_min_qty = 0.0
if po_line.order_id.location_id.usage != 'customer':
if po_line.product_id.seller_id.id == po_line.order_id.partner_id.id:
supplierinfo_min_qty = po_line.product_id.seller_qty
else:
supplierinfo_obj = self.pool.get('product.supplierinfo')
supplierinfo_ids = supplierinfo_obj.search(cr, uid, [('name', '=', po_line.order_id.partner_id.id), ('product_tmpl_id', '=', po_line.product_id.product_tmpl_id.id)])
supplierinfo_min_qty = supplierinfo_obj.browse(cr, uid, supplierinfo_ids).min_qty
if supplierinfo_min_qty == 0.0:
qty += po_line.product_qty
else:
# Recompute quantity by adding existing running procurements.
for proc in po_line.procurement_ids:
qty += uom_obj._compute_qty(cr, uid, proc.product_uom.id, proc.product_qty,
proc.product_id.uom_po_id.id) if proc.state == 'running' else 0.0
qty = max(qty, supplierinfo_min_qty) if qty > 0.0 else 0.0
price = po_line.price_unit
if qty != po_line.product_qty:
pricelist_obj = self.pool.get('product.pricelist')
pricelist_id = po_line.order_id.partner_id.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, po_line.order_id.partner_id.id, {'uom': procurement.product_uom.id})[pricelist_id]
return qty, price
def _get_grouping_dicts(self, cr, uid, ids, context=None):
"""
It will group the procurements according to the pos they should go into. That way, lines going to the same
po, can be processed at once.
Returns two dictionaries:
add_purchase_dicts: key: po value: procs to add to the po
create_purchase_dicts: key: values for proc to create (not that necessary as they are in procurement => TODO),
values: procs to add
"""
po_obj = self.pool.get('purchase.order')
# Regroup POs
cr.execute("""
SELECT psi.name, p.id, pr.id, pr.picking_type_id, p.location_id, p.partner_dest_id, p.company_id, p.group_id,
pr.group_propagation_option, pr.group_id, psi.qty
FROM procurement_order AS p
LEFT JOIN procurement_rule AS pr ON pr.id = p.rule_id
LEFT JOIN procurement_group AS pg ON p.group_id = pg.id,
product_supplierinfo AS psi, product_product AS pp
WHERE
p.product_id = pp.id AND p.id in %s AND psi.product_tmpl_id = pp.product_tmpl_id
AND (psi.company_id = p.company_id or psi.company_id IS NULL)
ORDER BY psi.sequence,
psi.name, p.rule_id, p.location_id, p.company_id, p.partner_dest_id, p.group_id
""", (tuple(ids), ))
res = cr.fetchall()
old = False
# A giant dict for grouping lines, ... to do at once
create_purchase_procs = {} # Lines to add to a newly to create po
add_purchase_procs = {} # Lines to add/adjust in an existing po
proc_seller = {} # To check we only process one po
for partner, proc, rule, pick_type, location, partner_dest, company, group, group_propagation, fixed_group, qty in res:
if not proc_seller.get(proc):
proc_seller[proc] = partner
new = partner, rule, pick_type, location, company, group, group_propagation, fixed_group
if new != old:
old = new
dom = [
('partner_id', '=', partner), ('state', '=', 'draft'), ('picking_type_id', '=', pick_type),
('location_id', '=', location), ('company_id', '=', company), ('dest_address_id', '=', partner_dest)]
if group_propagation == 'propagate':
dom += [('group_id', '=', group)]
elif group_propagation == 'fixed':
dom += [('group_id', '=', fixed_group)]
available_draft_po_ids = po_obj.search(cr, uid, dom, context=context)
available_draft_po = available_draft_po_ids and available_draft_po_ids[0] or False
# Add to dictionary
if available_draft_po:
if add_purchase_procs.get(available_draft_po):
add_purchase_procs[available_draft_po] += [proc]
else:
add_purchase_procs[available_draft_po] = [proc]
else:
if create_purchase_procs.get(new):
create_purchase_procs[new] += [proc]
else:
create_purchase_procs[new] = [proc]
return add_purchase_procs, create_purchase_procs
def make_po(self, cr, uid, ids, context=None):
res = {}
po_obj = self.pool.get('purchase.order')
po_line_obj = self.pool.get('purchase.order.line')
seq_obj = self.pool.get('ir.sequence')
uom_obj = self.pool.get('product.uom')
acc_pos_obj = self.pool.get('account.fiscal.position')
add_purchase_procs, create_purchase_procs = self._get_grouping_dicts(cr, uid, ids, context=context)
procs_done = []
# Let us check existing purchase orders and add/adjust lines on them
for add_purchase in add_purchase_procs.keys():
procs_done += add_purchase_procs[add_purchase]
po = po_obj.browse(cr, uid, add_purchase, context=context)
lines_to_update = {}
line_values = []
procurements = self.browse(cr, uid, add_purchase_procs[add_purchase], context=context)
po_line_ids = po_line_obj.search(cr, uid, [('order_id', '=', add_purchase), ('product_id', 'in', [x.product_id.id for x in procurements])], context=context)
po_lines = po_line_obj.browse(cr, uid, po_line_ids, context=context)
po_prod_dict = {}
for pol in po_lines:
po_prod_dict[pol.product_id.id] = pol
procs_to_create = []
#Check which procurements need a new line and which need to be added to an existing one
for proc in procurements:
if po_prod_dict.get(proc.product_id.id):
po_line = po_prod_dict[proc.product_id.id]
# FIXME: compute quantity using `_calc_new_qty_price` method.
# new_qty, new_price = self._calc_new_qty_price(cr, uid, proc, po_line=po_line, context=context)
uom_id = po_line.product_uom # Convert to UoM of existing line
qty = uom_obj._compute_qty_obj(cr, uid, proc.product_uom, proc.product_qty, uom_id)
if lines_to_update.get(po_line):
lines_to_update[po_line] += [(proc, qty)]
else:
lines_to_update[po_line] = [(proc, qty)]
else:
procs_to_create.append(proc)
procs = []
# FIXME: these are not real tracking values, it should be fixed if tracking values for one2many
# are managed
def format_message(message_description, tracked_values):
message = ''
if message_description:
message = '<span>%s</span>' % message_description
for name, values in tracked_values.iteritems():
message += '<div> • <b>%s</b>: ' % name
message += '%s</div>' % values
return message
# Update the quantities of the lines that need to
for line in lines_to_update.keys():
tot_qty = 0
for proc, qty in lines_to_update[line]:
tot_qty += qty
self.message_post(cr, uid, proc.id, body=_("Quantity added in existing Purchase Order Line"), context=context)
msg = format_message(_('Quantity added in existing Purchase Order Line'), {'Product': proc.product_id.name, 'Quantity': proc.product_qty, 'Procurement': proc.origin})
po_obj.message_post(cr, uid, [add_purchase], body=msg, context=context)
line_values += [(1, line.id, {'product_qty': line.product_qty + tot_qty, 'procurement_ids': [(4, x[0].id) for x in lines_to_update[line]]})]
# Create lines for which no line exists yet
if procs_to_create:
partner = po.partner_id
schedule_date = datetime.strptime(po.minimum_planned_date, DEFAULT_SERVER_DATETIME_FORMAT)
value_lines = self._get_po_line_values_from_procs(cr, uid, procs_to_create, partner, schedule_date, context=context)
line_values += [(0, 0, value_lines[x]) for x in value_lines.keys()]
for proc in procs_to_create:
self.message_post(cr, uid, [proc.id], body=_("Purchase line created and linked to an existing Purchase Order"), context=context)
msg = format_message(_('Purchase order line added'), {'Product': proc.product_id.name, 'Quantity': proc.product_qty, 'Procurement': proc.origin})
po_obj.message_post(cr, uid, [add_purchase], body=msg, context=context)
po_obj.write(cr, uid, [add_purchase], {'order_line': line_values},context=context)
# Create new purchase orders
partner_obj = self.pool.get("res.partner")
new_pos = []
for create_purchase in create_purchase_procs.keys():
procs_done += create_purchase_procs[create_purchase]
line_values = []
procurements = self.browse(cr, uid, create_purchase_procs[create_purchase], context=context)
partner = partner_obj.browse(cr, uid, create_purchase[0], context=context)
#Create purchase order itself:
procurement = procurements[0]
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, procurement.company_id, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, procurement.company_id, schedule_date, context=context)
value_lines = self._get_po_line_values_from_procs(cr, uid, procurements, partner, schedule_date, context=context)
line_values += [(0, 0, value_lines[x]) for x in value_lines.keys()]
name = seq_obj.next_by_code(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
gpo = procurement.rule_id.group_propagation_option
group = (gpo == 'fixed' and procurement.rule_id.group_id.id) or (gpo == 'propagate' and procurement.group_id.id) or False
fp = acc_pos_obj.get_fiscal_position(cr, uid, None, partner.id, context=context)
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': create_purchase[0],
'location_id': procurement.location_id.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'pricelist_id': partner.property_product_pricelist_purchase.id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position_id': fp,
'payment_term_id': partner.property_supplier_payment_term_id.id,
'dest_address_id': procurement.partner_dest_id.id,
'group_id': group,
'order_line': line_values,
}
new_po = po_obj.create(cr, uid, po_vals, context=context)
new_pos.append(new_po)
for proc in create_purchase_procs[create_purchase]:
self.message_post(cr, uid, proc, body=_("Draft Purchase Order created"), context=context)
other_proc_ids = list(set(ids) - set(procs_done))
res = dict.fromkeys(ids, True)
if other_proc_ids:
other_procs = self.browse(cr, uid, other_proc_ids, context=context)
for procurement in other_procs:
res[procurement.id] = False
self.message_post(cr, uid, [procurement.id], _('There is no vendor associated to product %s') % (procurement.product_id.name))
return res
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
def _get_buy_route(self, cr, uid, context=None):
buy_route = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'purchase.route_warehouse0_buy')
if buy_route:
return [buy_route]
return []
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.purchase_count for p in template.product_variant_ids])
return res
_columns = {
'property_account_creditor_price_difference': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
_defaults = {
'purchase_ok': 1,
'route_ids': _get_buy_route,
}
def action_view_purchases(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
return result
class product_product(osv.Model):
_name = 'product.product'
_inherit = 'product.product'
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
r = dict.fromkeys(ids, 0)
domain = [
('state', 'in', ['confirmed', 'approved', 'except_picking', 'except_invoice', 'done']),
('product_id', 'in', ids),
]
for group in self.pool['purchase.report'].read_group(cr, uid, domain, ['product_id', 'quantity'], ['product_id'], context=context):
r[group['product_id'][0]] = group['quantity']
return r
def action_view_purchases(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = self.pool['product.template']._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
class product_category(osv.Model):
_inherit = "product.category"
_columns = {
'property_account_creditor_price_difference_categ': fields.property(
type='many2one',
relation='account.account',
string="Price Difference Account",
help="This account will be used to value price difference between purchase price and cost price."),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, auto_commit=False, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_workflow(cr, uid, [context['default_res_id']], 'send_rfq')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
""" Override account_invoice to add Chatter messages on the related purchase
orders, logging the invoice receipt or payment. """
_inherit = 'account.invoice'
_columns = {
'purchase_ids': fields.many2many('purchase.order', 'purchase_invoice_rel', 'invoice_id',
'purchase_id', 'Purchases', copy=False,
help="Purchases linked to this invoice")
}
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice received"), context=context)
purchase_order_obj._set_po_lines_invoiced(cr, user_id, [po_id], context=context)
return res
def confirm_paid(self, cr, uid, ids, context=None):
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice paid"), context=context)
return res
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = super(account_invoice_line,self).move_line_get(cr, uid, invoice_id, context=context)
if self.company_id.anglo_saxon_accounting:
if inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line_ids:
res.extend(self._anglo_saxon_purchase_move_lines(cr, uid, i_line, res, context=context))
return res
def _anglo_saxon_purchase_move_lines(self, cr, uid, i_line, res, context=None):
"""Return the additional move lines for purchase invoices and refunds.
i_line: An account.invoice.line object.
res: The move line entries produced so far by the parent move_line_get.
"""
inv = i_line.invoice_id
company_currency = inv.company_id.currency_id.id
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ_id and i_line.product_id.categ_id.property_stock_account_input_categ_id.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position_id or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
account_prec = inv.company_id.currency_id.decimal_places
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if line.get('invl_id', 0) == i_line.id and a == line['account_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if inv.currency_id.id != company_currency:
valuation_price_unit = self.pool.get('res.currency').compute(cr, uid, company_currency, inv.currency_id.id, valuation_price_unit, context={'date': inv.date_invoice})
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
# price with discount and without tax included
price_unit = self.pool['account.tax'].compute_all(cr, uid, line['taxes'], i_line.price_unit * (1-(i_line.discount or 0.0)/100.0),
inv.currency_id.id, line['quantity'])['total_excluded']
price_line = round(valuation_price_unit * line['quantity'], account_prec)
price_diff = round(price_unit - price_line, account_prec)
line.update({'price': price_line})
diff_res.append({
'type': 'src',
'name': i_line.name[:64],
'price_unit': round(price_diff / line['quantity'], account_prec),
'quantity': line['quantity'],
'price': price_diff,
'account_id': acc,
'product_id': line['product_id'],
'uos_id': line['uos_id'],
'account_analytic_id': line['account_analytic_id'],
'taxes': line.get('taxes', []),
})
return diff_res
return []
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_ids': fields.many2many('purchase.order.line', 'purchase_order_line_invoice_rel', 'invoice_id','order_line_id',
'Purchase Order Lines', readonly=True, copy=False)
}
|
agpl-3.0
| 917,454,773,786,240,900
| 54.662047
| 302
| 0.586893
| false
| 3.937295
| false
| false
| false
|
orlenko/plei
|
pleiapp/migrations/0002_auto__add_tagline.py
|
1
|
19744
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tagline'
db.create_table(u'pleiapp_tagline', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('text', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'pleiapp', ['Tagline'])
# Adding M2M table for field related_resources on 'Dictionary'
m2m_table_name = db.shorten_name(u'pleiapp_dictionary_related_resources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('dictionary', models.ForeignKey(orm[u'pleiapp.dictionary'], null=False)),
('resource', models.ForeignKey(orm[u'pleiapp.resource'], null=False))
))
db.create_unique(m2m_table_name, ['dictionary_id', 'resource_id'])
# Adding M2M table for field related_faqs on 'Dictionary'
m2m_table_name = db.shorten_name(u'pleiapp_dictionary_related_faqs')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('dictionary', models.ForeignKey(orm[u'pleiapp.dictionary'], null=False)),
('faq', models.ForeignKey(orm[u'pleiapp.faq'], null=False))
))
db.create_unique(m2m_table_name, ['dictionary_id', 'faq_id'])
# Adding M2M table for field related_resources on 'Faq'
m2m_table_name = db.shorten_name(u'pleiapp_faq_related_resources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('faq', models.ForeignKey(orm[u'pleiapp.faq'], null=False)),
('resource', models.ForeignKey(orm[u'pleiapp.resource'], null=False))
))
db.create_unique(m2m_table_name, ['faq_id', 'resource_id'])
def backwards(self, orm):
# Deleting model 'Tagline'
db.delete_table(u'pleiapp_tagline')
# Removing M2M table for field related_resources on 'Dictionary'
db.delete_table(db.shorten_name(u'pleiapp_dictionary_related_resources'))
# Removing M2M table for field related_faqs on 'Dictionary'
db.delete_table(db.shorten_name(u'pleiapp_dictionary_related_faqs'))
# Removing M2M table for field related_resources on 'Faq'
db.delete_table(db.shorten_name(u'pleiapp_faq_related_resources'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': u"orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
u'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'pleiapp.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.dictionary': {
'Meta': {'ordering': "('title',)", 'object_name': 'Dictionary'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_dictionary_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Dictionary']"}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Faq']", 'symmetrical': 'False', 'blank': 'True'}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Resource']", 'symmetrical': 'False', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'dicts'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dictionarys'", 'to': u"orm['auth.User']"})
},
u'pleiapp.faq': {
'Meta': {'ordering': "('title',)", 'object_name': 'Faq'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Dictionary']", 'symmetrical': 'False', 'blank': 'True'}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_faqs_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Faq']"}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Resource']", 'symmetrical': 'False', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'faqs'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'faqs'", 'to': u"orm['auth.User']"})
},
u'pleiapp.frontpageitem': {
'Meta': {'object_name': 'FrontPageItem'},
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'pleiapp.resource': {
'Meta': {'ordering': "('title',)", 'object_name': 'Resource'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': u"orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_dictionary': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Dictionary']", 'symmetrical': 'False', 'blank': 'True'}),
'related_faqs': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pleiapp.Faq']", 'symmetrical': 'False', 'blank': 'True'}),
'related_resources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_resources_rel_+'", 'blank': 'True', 'to': u"orm['pleiapp.Resource']"}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Topic']"}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'resources'", 'blank': 'True', 'to': u"orm['pleiapp.Type']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'resources'", 'to': u"orm['auth.User']"})
},
u'pleiapp.tagline': {
'Meta': {'object_name': 'Tagline'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'pleiapp.topic': {
'Meta': {'ordering': "('title',)", 'object_name': 'Topic'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'pleiapp.type': {
'Meta': {'ordering': "('title',)", 'object_name': 'Type'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'visible': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['pleiapp']
|
bsd-2-clause
| -8,362,666,055,092,364,000
| 80.929461
| 191
| 0.562804
| false
| 3.557477
| false
| false
| false
|
leonth/private-configs
|
sublime-text-3/Packages/SublimePythonIDE/server/server.py
|
1
|
13177
|
import os
import sys
import time
import logging
import tempfile
import threading
if sys.version_info[0] == 2:
sys.path.insert(
0, os.path.join(os.path.dirname(__file__), "..", "lib", "python2"))
from SimpleXMLRPCServer import SimpleXMLRPCServer
from xmlrpclib import Binary
else:
sys.path.insert(
0, os.path.join(os.path.dirname(__file__), "..", "lib", "python3"))
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.client import Binary
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "lib"))
from linter import do_linting
from rope.base import libutils
from rope.base.project import Project
from rope.base.exceptions import ModuleSyntaxError
from rope.contrib.codeassist import (
code_assist, sorted_proposals, get_doc, get_definition_location
)
# global state of the server process
last_heartbeat = None
# constants
HEARTBEAT_TIMEOUT = 19
NO_ROOT_PATH = -1
class RopeProjectMixin(object):
"""
Creates and manages Rope projects"""
def __init__(self):
self.projects = {}
self.buffer_tmpfile_map = {}
self.tempfiles = []
def __del__(self):
'''Cleanup temporary files when server is deallocated. Although
Python destructors are not guaranteed to be run it is still ok to
do cleanup here, as a tempfile surviving the server in TEMPDIR
is not too big of a problem.'''
for tfn in self.tempfiles:
os.unlink(tfn)
def project_for(self, project_path, file_path, source=""):
# scratch buffer case: create temp file and proj for buffer and cache it
if file_path.startswith("BUFFER:"):
if file_path in self.projects:
project = self.projects[file_path]
file_path = self.buffer_tmpfile_map[file_path]
else:
original_file_path = file_path
file_path = self._create_temp_file(source)
project = self._create_single_file_project(file_path)
self.projects[original_file_path] = project
self.buffer_tmpfile_map[original_file_path] = file_path
# single file case (or scratch buffer with client not sending buffer_id)
# create temp file and proj, and buffer if file_name given
elif project_path == NO_ROOT_PATH:
if file_path in self.projects:
project = self.projects[file_path]
else:
if not file_path:
# this path is deprecated and should not be used anymore
file_path = self._create_temp_file(source)
project = self._create_single_file_project(file_path)
else:
project = self._create_single_file_project(file_path)
self.projects[file_path] = project
# "usual" case: a real file with a project directory is given
else:
if project_path in self.projects:
project = self.projects[project_path]
else:
project = self._create_project(project_path)
self.projects[project_path] = project
return project, file_path
def list_projects(self):
return self.projects.keys()
def _create_project(self, path):
project = Project(path, fscommands=None, ropefolder=None)
return project
def _create_single_file_project(self, path):
folder = os.path.dirname(path)
ignored_res = os.listdir(folder)
ignored_res.remove(os.path.basename(path))
project = Project(
folder, ropefolder=None,
ignored_resources=ignored_res, fscommands=None)
return project
def _create_temp_file(self, content):
"""
Creates a temporary named file for use by Rope. It expects to
be able to read files from disk in some places, so there is no
easy way around creating these files. We try to delete those
files in the servers destructor (see __del__).
"""
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile.write(content.encode("utf-8"))
tf_path = tmpfile.name
self.tempfiles.append(tf_path)
tmpfile.close()
return tf_path
class RopeFunctionsMixin(object):
"""Uses Rope to generate completion proposals, depends on RopeProjectMixin
"""
def profile_completions(self, source, project_path, file_path, loc):
"""
Only for testing purposes::
runs Rope's code completion functionality in the python profiler
and saves statistics, then reruns for actual results
"""
try:
import cProfile as profile
except:
import profile
profile.runctx(
"self.completions(source, project_path, file_path, loc)",
globals(), locals(), os.path.expanduser("~/SublimePython.stats"))
return self.completions(source, project_path, file_path, loc)
def completions(self, source, project_path, file_path, loc):
"""
Get completions from the underlying Rope library and returns it back
to the editor interface
:param source: the document source
:param project_path: the actual project_path
:param file_path: the actual file path
:param loc: the buffer location
:returns: a list of tuples of strings
"""
project, resource = self._get_resource(project_path, file_path, source)
try:
proposals = code_assist(
project, source, loc, resource=resource, maxfixes=3)
proposals = sorted_proposals(proposals)
except ModuleSyntaxError:
proposals = []
except Exception:
import traceback
traceback.print_exc()
proposals = []
finally:
proposals = [
(self._proposal_string(p), self._insert_string(p))
for p in proposals if p.name != 'self='
]
return proposals
def documentation(self, source, project_path, file_path, loc):
"""
Search for documentation about the word in the current location
:param source: the document source
:param project_path: the actual project_path
:param file_path: the actual file path
:param loc: the buffer location
:returns: a string containing the documentation
"""
project, resource = self._get_resource(project_path, file_path, source)
try:
doc = get_doc(project, source, loc, resource=resource, maxfixes=3)
except ModuleSyntaxError:
doc = None
return doc
def definition_location(self, source, project_path, file_path, loc):
"""
Get a global definition location and returns it back to the editor
:param source: the document source
:param project_path: the actual project_path
:param file_path: the actual file path
:param loc: the buffer location
:returns: a tuple containing the path and the line number
"""
project, resource = self._get_resource(project_path, file_path, source)
real_path, def_lineno = (None, None)
try:
def_resource, def_lineno = get_definition_location(
project, source, loc, resource=resource, maxfixes=3)
if def_resource:
real_path = def_resource.real_path
except ModuleSyntaxError:
pass
return real_path, def_lineno
def report_changed(self, project_path, file_path):
"""
Reports the change of the contents of file_path.
:param project_path: the actual project path
:param file_path: the file path
"""
if project_path != NO_ROOT_PATH:
project, file_path = self.project_for(project_path, file_path)
libutils.report_change(project, file_path, "")
def _proposal_string(self, p):
"""
Build and return a string for the proposals of completions
:param p: the original proposal structure
"""
if p.parameters:
params = [par for par in p.parameters if par != 'self']
result = '{name}({params})'.format(
name=p.name,
params=', '.join(param for param in params)
)
else:
result = p.name
return '{result}\t({scope}, {type})'.format(
result=result, scope=p.scope, type=p.type)
def _insert_string(self, p):
"""
"""
if p.parameters:
params = [par for par in p.parameters if par != 'self']
param_snippet = ", ".join(
"${%i:%s}" %
(idx + 1, param) for idx, param in enumerate(params))
result = "%s(%s)" % (p.name, param_snippet)
else:
result = p.name
return result
def _get_resource(self, project_path, file_path, source):
"""Get and returns project and resource objects from Rope library
"""
project, file_path = self.project_for(project_path, file_path, source)
return project, libutils.path_to_resource(project, file_path)
class HeartBeatMixin(object):
"""
Waits for heartbeat messages from SublimeText. The main thread
kills the process if no heartbeat arrived in HEARTBEAT_TIMEOUT seconds.
"""
def __init__(self):
self.heartbeat()
def heartbeat(self):
global last_heartbeat
last_heartbeat = time.time()
logging.debug('bumbum %f', last_heartbeat)
class LinterMixin(object):
"""
Performs a PyFlakes and PEP8 check on the input code, returns either a
list of messages or a single syntax error in case of an error while
parsing the code. The receiver thus has to check for these two
cases.
"""
def check_syntax(self, code, encoding, lint_settings, filename):
'''The linting mixin does not use the project_for machinery,
but uses the linters directy.'''
try:
codes = do_linting(lint_settings, code, encoding, filename)
except Exception:
import traceback
sys.stderr.write(traceback.format_exc())
import pickle
ret = Binary(pickle.dumps(codes))
return ret
class Server(RopeProjectMixin, HeartBeatMixin,
RopeFunctionsMixin, LinterMixin):
"""
Python's SimpleXMLRPCServer accepts just one call of
register_instance(), so this class just combines the above
mixins.
"""
def __init__(self):
RopeProjectMixin.__init__(self)
RopeFunctionsMixin.__init__(self)
HeartBeatMixin.__init__(self)
LinterMixin.__init__(self)
class DebuggingServer(Server):
"""
Prints calls and exceptions to stderr
"""
def __init__(self):
Server.__init__(self)
def _dispatch(self, method, params):
try:
sys.stderr.write("SublimePythonIDE Server is called: %s\n" % str(method))
method = getattr(self, method)
return method(*params)
except Exception as e:
sys.stderr.write("SublimePythonIDE Server Error: %s\n" % str(e))
import traceback
traceback.print_exc()
class XMLRPCServerThread(threading.Thread):
"""
Runs a SimpleXMLRPCServer in a new thread, so that the main
thread can watch for the heartbeats and kill the process if no
heartbeat messages arrive in time
:param port: the port where to listen to
:type port: int
"""
def __init__(self, port, debug):
threading.Thread.__init__(self)
self.port = port
self.daemon = True
self.debug = debug
def run(self):
self.server = SimpleXMLRPCServer(
("localhost", port), allow_none=True, logRequests=False)
# enable debugging?
if self.debug:
sys.stderr.write("SublimePythonIDE Server is starting in Debug mode\n")
self.server.register_instance(DebuggingServer())
else:
self.server.register_instance(Server())
self.server.serve_forever()
if __name__ == '__main__':
try:
# single argument to this process should be the port to listen on
port = int(sys.argv[1])
# second argument may be "--debug" in which case the server prints to stderr
debug = False
if len(sys.argv) > 2 and sys.argv[2].strip() == "--debug":
debug = True
# the SimpleXMLRPCServer is run in a new thread
server_thread = XMLRPCServerThread(port, debug)
server_thread.start()
# the main thread checks for heartbeat messages
while 1:
time.sleep(HEARTBEAT_TIMEOUT)
if time.time() - last_heartbeat > HEARTBEAT_TIMEOUT:
sys.exit()
except Exception as e:
sys.stderr.write("SublimePythonIDE Server Error: %s\n" % str(e))
import traceback
traceback.print_exc()
|
mit
| 5,703,854,125,502,567,000
| 32.191436
| 88
| 0.600364
| false
| 4.265782
| false
| false
| false
|
google-research/google-research
|
bigg/bigg/model/tree_model.py
|
1
|
22863
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: skip-file
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import defaultdict
from torch.nn.parameter import Parameter
from bigg.common.pytorch_util import glorot_uniform, MLP, BinaryTreeLSTMCell
from tqdm import tqdm
from bigg.model.util import AdjNode, ColAutomata, AdjRow
from bigg.model.tree_clib.tree_lib import TreeLib
from bigg.torch_ops import multi_index_select, PosEncoding
def hc_multi_select(ids_from, ids_to, h_froms, c_froms):
h_vecs = multi_index_select(ids_from,
ids_to,
*h_froms)
c_vecs = multi_index_select(ids_from,
ids_to,
*c_froms)
return h_vecs, c_vecs
def tree_state_select(h_bot, c_bot, h_buf, c_buf, fn_all_ids):
bot_froms, bot_tos, prev_froms, prev_tos = fn_all_ids()
if h_buf is None or prev_tos is None:
h_vecs = multi_index_select([bot_froms], [bot_tos], h_bot)
c_vecs = multi_index_select([bot_froms], [bot_tos], c_bot)
elif h_bot is None or bot_tos is None:
h_vecs = multi_index_select([prev_froms], [prev_tos], h_buf)
c_vecs = multi_index_select([prev_froms], [prev_tos], c_buf)
else:
h_vecs, c_vecs = hc_multi_select([bot_froms, prev_froms],
[bot_tos, prev_tos],
[h_bot, h_buf], [c_bot, c_buf])
return h_vecs, c_vecs
def batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, fn_all_ids, cell):
h_list = []
c_list = []
for i in range(2):
h_vecs, c_vecs = tree_state_select(h_bot, c_bot, h_buf, c_buf, lambda : fn_all_ids(i))
h_list.append(h_vecs)
c_list.append(c_vecs)
return cell((h_list[0], c_list[0]), (h_list[1], c_list[1]))
def batch_tree_lstm3(h_bot, c_bot, h_buf, c_buf, h_past, c_past, fn_all_ids, cell):
if h_past is None:
return batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, lambda i: fn_all_ids(i)[:-2], cell)
elif h_bot is None:
return batch_tree_lstm2(h_buf, c_buf, h_past, c_past, lambda i: fn_all_ids(i)[2:], cell)
elif h_buf is None:
return batch_tree_lstm2(h_bot, c_bot, h_past, c_past, lambda i: fn_all_ids(i)[0, 1, 4, 5], cell)
else:
h_list = []
c_list = []
for i in range(2):
bot_froms, bot_tos, prev_froms, prev_tos, past_froms, past_tos = fn_all_ids(i)
h_vecs, c_vecs = hc_multi_select([bot_froms, prev_froms, past_froms],
[bot_tos, prev_tos, past_tos],
[h_bot, h_buf, h_past],
[c_bot, c_buf, c_past])
h_list.append(h_vecs)
c_list.append(c_vecs)
return cell((h_list[0], c_list[0]), (h_list[1], c_list[1]))
class FenwickTree(nn.Module):
def __init__(self, args):
super(FenwickTree, self).__init__()
self.init_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.init_c0 = Parameter(torch.Tensor(1, args.embed_dim))
glorot_uniform(self)
self.merge_cell = BinaryTreeLSTMCell(args.embed_dim)
self.summary_cell = BinaryTreeLSTMCell(args.embed_dim)
if args.pos_enc:
self.pos_enc = PosEncoding(args.embed_dim, args.device, args.pos_base)
else:
self.pos_enc = lambda x: 0
def reset(self, list_states=[]):
self.list_states = []
for l in list_states:
t = []
for e in l:
t.append(e)
self.list_states.append(t)
def append_state(self, state, level):
if level >= len(self.list_states):
num_aug = level - len(self.list_states) + 1
for i in range(num_aug):
self.list_states.append([])
self.list_states[level].append(state)
def forward(self, new_state=None):
if new_state is None:
if len(self.list_states) == 0:
return (self.init_h0, self.init_c0)
else:
self.append_state(new_state, 0)
pos = 0
while pos < len(self.list_states):
if len(self.list_states[pos]) >= 2:
lch_state, rch_state = self.list_states[pos] # assert the length is 2
new_state = self.merge_cell(lch_state, rch_state)
self.list_states[pos] = []
self.append_state(new_state, pos + 1)
pos += 1
state = None
for pos in range(len(self.list_states)):
if len(self.list_states[pos]) == 0:
continue
cur_state = self.list_states[pos][0]
if state is None:
state = cur_state
else:
state = self.summary_cell(state, cur_state)
return state
def forward_train(self, h_bot, c_bot, h_buf0, c_buf0, prev_rowsum_h, prrev_rowsum_c):
# embed row tree
tree_agg_ids = TreeLib.PrepareRowEmbed()
row_embeds = [(self.init_h0, self.init_c0)]
if h_bot is not None:
row_embeds.append((h_bot, c_bot))
if prev_rowsum_h is not None:
row_embeds.append((prev_rowsum_h, prrev_rowsum_c))
if h_buf0 is not None:
row_embeds.append((h_buf0, c_buf0))
th_bot = h_bot
tc_bot = c_bot
for i, all_ids in enumerate(tree_agg_ids):
fn_ids = lambda x: all_ids[x]
if i:
th_bot = tc_bot = None
new_states = batch_tree_lstm3(th_bot, tc_bot,
row_embeds[-1][0], row_embeds[-1][1],
prev_rowsum_h, prrev_rowsum_c,
fn_ids, self.merge_cell)
row_embeds.append(new_states)
h_list, c_list = zip(*row_embeds)
joint_h = torch.cat(h_list, dim=0)
joint_c = torch.cat(c_list, dim=0)
# get history representation
init_select, all_ids, last_tos, next_ids, pos_info = TreeLib.PrepareRowSummary()
cur_state = (joint_h[init_select], joint_c[init_select])
ret_state = (joint_h[next_ids], joint_c[next_ids])
hist_rnn_states = []
hist_froms = []
hist_tos = []
for i, (done_from, done_to, proceed_from, proceed_input) in enumerate(all_ids):
hist_froms.append(done_from)
hist_tos.append(done_to)
hist_rnn_states.append(cur_state)
next_input = joint_h[proceed_input], joint_c[proceed_input]
sub_state = cur_state[0][proceed_from], cur_state[1][proceed_from]
cur_state = self.summary_cell(sub_state, next_input)
hist_rnn_states.append(cur_state)
hist_froms.append(None)
hist_tos.append(last_tos)
hist_h_list, hist_c_list = zip(*hist_rnn_states)
pos_embed = self.pos_enc(pos_info)
row_h = multi_index_select(hist_froms, hist_tos, *hist_h_list) + pos_embed
row_c = multi_index_select(hist_froms, hist_tos, *hist_c_list) + pos_embed
return (row_h, row_c), ret_state
class BitsRepNet(nn.Module):
def __init__(self, args):
super(BitsRepNet, self).__init__()
self.bits_compress = args.bits_compress
self.out_dim = args.embed_dim
assert self.out_dim >= self.bits_compress
self.device = args.device
def forward(self, on_bits, n_cols):
h = torch.zeros(1, self.out_dim).to(self.device)
h[0, :n_cols] = -1.0
h[0, on_bits] = 1.0
return h, h
class RecurTreeGen(nn.Module):
def __init__(self, args):
super(RecurTreeGen, self).__init__()
self.directed = args.directed
self.self_loop = args.self_loop
self.bits_compress = args.bits_compress
self.greedy_frac = args.greedy_frac
self.share_param = args.share_param
if not self.bits_compress:
self.leaf_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.leaf_c0 = Parameter(torch.Tensor(1, args.embed_dim))
self.empty_h0 = Parameter(torch.Tensor(1, args.embed_dim))
self.empty_c0 = Parameter(torch.Tensor(1, args.embed_dim))
self.topdown_left_embed = Parameter(torch.Tensor(2, args.embed_dim))
self.topdown_right_embed = Parameter(torch.Tensor(2, args.embed_dim))
glorot_uniform(self)
if self.bits_compress > 0:
self.bit_rep_net = BitsRepNet(args)
if self.share_param:
self.m_l2r_cell = BinaryTreeLSTMCell(args.embed_dim)
self.lr2p_cell = BinaryTreeLSTMCell(args.embed_dim)
self.pred_has_ch = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_pred_has_left = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_pred_has_right = MLP(args.embed_dim, [2 * args.embed_dim, 1])
self.m_cell_topdown = nn.LSTMCell(args.embed_dim, args.embed_dim)
self.m_cell_topright = nn.LSTMCell(args.embed_dim, args.embed_dim)
else:
fn_pred = lambda: MLP(args.embed_dim, [2 * args.embed_dim, 1])
fn_tree_cell = lambda: BinaryTreeLSTMCell(args.embed_dim)
fn_lstm_cell = lambda: nn.LSTMCell(args.embed_dim, args.embed_dim)
num_params = int(np.ceil(np.log2(args.max_num_nodes))) + 1
self.pred_has_ch = fn_pred()
pred_modules = [[] for _ in range(2)]
tree_cell_modules = []
lstm_cell_modules = [[] for _ in range(2)]
for _ in range(num_params):
for i in range(2):
pred_modules[i].append(fn_pred())
lstm_cell_modules[i].append(fn_lstm_cell())
tree_cell_modules.append(fn_tree_cell())
self.has_left_modules, self.has_right_modules = [nn.ModuleList(l) for l in pred_modules]
self.l2r_modules= nn.ModuleList(tree_cell_modules)
self.cell_topdown_modules, self.cell_topright_modules = [nn.ModuleList(l) for l in lstm_cell_modules]
self.lr2p_cell = fn_tree_cell()
self.row_tree = FenwickTree(args)
if args.tree_pos_enc:
self.tree_pos_enc = PosEncoding(args.embed_dim, args.device, args.pos_base, bias=np.pi / 4)
else:
self.tree_pos_enc = lambda x: 0
def cell_topdown(self, x, y, lv):
cell = self.m_cell_topdown if self.share_param else self.cell_topdown_modules[lv]
return cell(x, y)
def cell_topright(self, x, y, lv):
cell = self.m_cell_topright if self.share_param else self.cell_topright_modules[lv]
return cell(x, y)
def l2r_cell(self, x, y, lv):
cell = self.m_l2r_cell if self.share_param else self.l2r_modules[lv]
return cell(x, y)
def pred_has_left(self, x, lv):
mlp = self.m_pred_has_left if self.share_param else self.has_left_modules[lv]
return mlp(x)
def pred_has_right(self, x, lv):
mlp = self.m_pred_has_right if self.share_param else self.has_right_modules[lv]
return mlp(x)
def get_empty_state(self):
if self.bits_compress:
return self.bit_rep_net([], 1)
else:
return (self.empty_h0, self.empty_c0)
def get_prob_fix(self, prob):
p = prob * (1 - self.greedy_frac)
if prob >= 0.5:
p += self.greedy_frac
return p
def gen_row(self, ll, state, tree_node, col_sm, lb, ub):
assert lb <= ub
if tree_node.is_root:
prob_has_edge = torch.sigmoid(self.pred_has_ch(state[0]))
if col_sm.supervised:
has_edge = len(col_sm.indices) > 0
else:
has_edge = np.random.rand() < self.get_prob_fix(prob_has_edge.item())
if ub == 0:
has_edge = False
if tree_node.n_cols <= 0:
has_edge = False
if lb:
has_edge = True
if has_edge:
ll = ll + torch.log(prob_has_edge)
else:
ll = ll + torch.log(1 - prob_has_edge)
tree_node.has_edge = has_edge
else:
assert ub > 0
tree_node.has_edge = True
if not tree_node.has_edge: # an empty tree
return ll, self.get_empty_state(), 0
if tree_node.is_leaf:
tree_node.bits_rep = [0]
col_sm.add_edge(tree_node.col_range[0])
if self.bits_compress:
return ll, self.bit_rep_net(tree_node.bits_rep, tree_node.n_cols), 1
else:
return ll, (self.leaf_h0, self.leaf_c0), 1
else:
tree_node.split()
mid = (tree_node.col_range[0] + tree_node.col_range[1]) // 2
left_prob = torch.sigmoid(self.pred_has_left(state[0], tree_node.depth))
if col_sm.supervised:
has_left = col_sm.next_edge < mid
else:
has_left = np.random.rand() < self.get_prob_fix(left_prob.item())
if ub == 0:
has_left = False
if lb > tree_node.rch.n_cols:
has_left = True
ll = ll + (torch.log(left_prob) if has_left else torch.log(1 - left_prob))
left_pos = self.tree_pos_enc([tree_node.lch.n_cols])
state = self.cell_topdown(self.topdown_left_embed[[int(has_left)]] + left_pos, state, tree_node.depth)
if has_left:
lub = min(tree_node.lch.n_cols, ub)
llb = max(0, lb - tree_node.rch.n_cols)
ll, left_state, num_left = self.gen_row(ll, state, tree_node.lch, col_sm, llb, lub)
else:
left_state = self.get_empty_state()
num_left = 0
right_pos = self.tree_pos_enc([tree_node.rch.n_cols])
topdown_state = self.l2r_cell(state, (left_state[0] + right_pos, left_state[1] + right_pos), tree_node.depth)
rlb = max(0, lb - num_left)
rub = min(tree_node.rch.n_cols, ub - num_left)
if not has_left:
has_right = True
else:
right_prob = torch.sigmoid(self.pred_has_right(topdown_state[0], tree_node.depth))
if col_sm.supervised:
has_right = col_sm.has_edge(mid, tree_node.col_range[1])
else:
has_right = np.random.rand() < self.get_prob_fix(right_prob.item())
if rub == 0:
has_right = False
if rlb:
has_right = True
ll = ll + (torch.log(right_prob) if has_right else torch.log(1 - right_prob))
topdown_state = self.cell_topright(self.topdown_right_embed[[int(has_right)]], topdown_state, tree_node.depth)
if has_right: # has edge in right child
ll, right_state, num_right = self.gen_row(ll, topdown_state, tree_node.rch, col_sm, rlb, rub)
else:
right_state = self.get_empty_state()
num_right = 0
if tree_node.col_range[1] - tree_node.col_range[0] <= self.bits_compress:
summary_state = self.bit_rep_net(tree_node.bits_rep, tree_node.n_cols)
else:
summary_state = self.lr2p_cell(left_state, right_state)
return ll, summary_state, num_left + num_right
def forward(self, node_end, edge_list=None, node_start=0, list_states=[], lb_list=None, ub_list=None, col_range=None, num_nodes=None, display=False):
pos = 0
total_ll = 0.0
edges = []
self.row_tree.reset(list_states)
controller_state = self.row_tree()
if num_nodes is None:
num_nodes = node_end
pbar = range(node_start, node_end)
if display:
pbar = tqdm(pbar)
for i in pbar:
if edge_list is None:
col_sm = ColAutomata(supervised=False)
else:
indices = []
while pos < len(edge_list) and i == edge_list[pos][0]:
indices.append(edge_list[pos][1])
pos += 1
indices.sort()
col_sm = ColAutomata(supervised=True, indices=indices)
cur_row = AdjRow(i, self.directed, self.self_loop, col_range=col_range)
lb = 0 if lb_list is None else lb_list[i]
ub = cur_row.root.n_cols if ub_list is None else ub_list[i]
cur_pos_embed = self.row_tree.pos_enc([num_nodes - i])
controller_state = [x + cur_pos_embed for x in controller_state]
ll, cur_state, _ = self.gen_row(0, controller_state, cur_row.root, col_sm, lb, ub)
assert lb <= len(col_sm.indices) <= ub
controller_state = self.row_tree(cur_state)
edges += [(i, x) for x in col_sm.indices]
total_ll = total_ll + ll
return total_ll, edges, self.row_tree.list_states
def binary_ll(self, pred_logits, np_label, need_label=False, reduction='sum'):
pred_logits = pred_logits.view(-1, 1)
label = torch.tensor(np_label, dtype=torch.float32).to(pred_logits.device).view(-1, 1)
loss = F.binary_cross_entropy_with_logits(pred_logits, label, reduction=reduction)
if need_label:
return -loss, label
return -loss
def forward_row_trees(self, graph_ids, list_node_starts=None, num_nodes=-1, list_col_ranges=None):
TreeLib.PrepareMiniBatch(graph_ids, list_node_starts, num_nodes, list_col_ranges)
# embed trees
all_ids = TreeLib.PrepareTreeEmbed()
if not self.bits_compress:
h_bot = torch.cat([self.empty_h0, self.leaf_h0], dim=0)
c_bot = torch.cat([self.empty_c0, self.leaf_c0], dim=0)
fn_hc_bot = lambda d: (h_bot, c_bot)
else:
binary_embeds, base_feat = TreeLib.PrepareBinary()
fn_hc_bot = lambda d: (binary_embeds[d], binary_embeds[d]) if d < len(binary_embeds) else base_feat
max_level = len(all_ids) - 1
h_buf_list = [None] * (len(all_ids) + 1)
c_buf_list = [None] * (len(all_ids) + 1)
for d in range(len(all_ids) - 1, -1, -1):
fn_ids = lambda i: all_ids[d][i]
if d == max_level:
h_buf = c_buf = None
else:
h_buf = h_buf_list[d + 1]
c_buf = c_buf_list[d + 1]
h_bot, c_bot = fn_hc_bot(d + 1)
new_h, new_c = batch_tree_lstm2(h_bot, c_bot, h_buf, c_buf, fn_ids, self.lr2p_cell)
h_buf_list[d] = new_h
c_buf_list[d] = new_c
return fn_hc_bot, h_buf_list, c_buf_list
def forward_row_summaries(self, graph_ids, list_node_starts=None, num_nodes=-1, prev_rowsum_states=[None, None], list_col_ranges=None):
fn_hc_bot, h_buf_list, c_buf_list = self.forward_row_trees(graph_ids, list_node_starts, num_nodes, list_col_ranges)
row_states, next_states = self.row_tree.forward_train(*(fn_hc_bot(0)), h_buf_list[0], c_buf_list[0], *prev_rowsum_states)
return row_states, next_states
def forward_train(self, graph_ids, list_node_starts=None, num_nodes=-1, prev_rowsum_states=[None, None], list_col_ranges=None):
fn_hc_bot, h_buf_list, c_buf_list = self.forward_row_trees(graph_ids, list_node_starts, num_nodes, list_col_ranges)
row_states, next_states = self.row_tree.forward_train(*(fn_hc_bot(0)), h_buf_list[0], c_buf_list[0], *prev_rowsum_states)
# make prediction
logit_has_edge = self.pred_has_ch(row_states[0])
has_ch, _ = TreeLib.GetChLabel(0, dtype=np.bool)
ll = self.binary_ll(logit_has_edge, has_ch)
# has_ch_idx
cur_states = (row_states[0][has_ch], row_states[1][has_ch])
lv = 0
while True:
is_nonleaf = TreeLib.QueryNonLeaf(lv)
if is_nonleaf is None or np.sum(is_nonleaf) == 0:
break
cur_states = (cur_states[0][is_nonleaf], cur_states[1][is_nonleaf])
left_logits = self.pred_has_left(cur_states[0], lv)
has_left, num_left = TreeLib.GetChLabel(-1, lv)
left_update = self.topdown_left_embed[has_left] + self.tree_pos_enc(num_left)
left_ll, float_has_left = self.binary_ll(left_logits, has_left, need_label=True, reduction='sum')
ll = ll + left_ll
cur_states = self.cell_topdown(left_update, cur_states, lv)
left_ids = TreeLib.GetLeftRootStates(lv)
h_bot, c_bot = fn_hc_bot(lv + 1)
if lv + 1 < len(h_buf_list):
h_next_buf, c_next_buf = h_buf_list[lv + 1], c_buf_list[lv + 1]
else:
h_next_buf = c_next_buf = None
left_subtree_states = tree_state_select(h_bot, c_bot,
h_next_buf, c_next_buf,
lambda: left_ids)
has_right, num_right = TreeLib.GetChLabel(1, lv)
right_pos = self.tree_pos_enc(num_right)
left_subtree_states = [x + right_pos for x in left_subtree_states]
topdown_state = self.l2r_cell(cur_states, left_subtree_states, lv)
right_logits = self.pred_has_right(topdown_state[0], lv)
right_update = self.topdown_right_embed[has_right]
topdown_state = self.cell_topright(right_update, topdown_state, lv)
right_ll = self.binary_ll(right_logits, has_right, reduction='none') * float_has_left
ll = ll + torch.sum(right_ll)
lr_ids = TreeLib.GetLeftRightSelect(lv, np.sum(has_left), np.sum(has_right))
new_states = []
for i in range(2):
new_s = multi_index_select([lr_ids[0], lr_ids[2]], [lr_ids[1], lr_ids[3]],
cur_states[i], topdown_state[i])
new_states.append(new_s)
cur_states = tuple(new_states)
lv += 1
return ll, next_states
|
apache-2.0
| 8,929,886,291,118,226,000
| 42.715105
| 153
| 0.55264
| false
| 3.206142
| false
| false
| false
|
theislab/scanpy
|
scanpy/tests/test_ingest.py
|
1
|
3770
|
import pytest
import numpy as np
from sklearn.neighbors import KDTree
from umap import UMAP
import scanpy as sc
from scanpy import settings
from scanpy._compat import pkg_version
X = np.array(
[
[1.0, 2.5, 3.0, 5.0, 8.7],
[4.2, 7.0, 9.0, 11.0, 7.0],
[5.1, 2.0, 9.0, 4.0, 9.0],
[7.0, 9.4, 6.8, 9.1, 8.0],
[8.9, 8.6, 9.6, 1.0, 2.0],
[6.5, 8.9, 2.2, 4.5, 8.9],
]
)
T = np.array([[2.0, 3.5, 4.0, 1.0, 4.7], [3.2, 2.0, 5.0, 5.0, 8.0]])
@pytest.fixture
def adatas():
pbmc = sc.datasets.pbmc68k_reduced()
n_split = 500
adata_ref = sc.AnnData(pbmc.X[:n_split, :], obs=pbmc.obs.iloc[:n_split])
adata_new = sc.AnnData(pbmc.X[n_split:, :])
sc.pp.pca(adata_ref)
sc.pp.neighbors(adata_ref)
sc.tl.umap(adata_ref)
return adata_ref, adata_new
def test_representation(adatas):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
assert ing._use_rep == 'X_pca'
assert ing._obsm['rep'].shape == (adata_new.n_obs, settings.N_PCS)
assert ing._pca_centered
sc.pp.pca(adata_ref, n_comps=30, zero_center=False)
sc.pp.neighbors(adata_ref)
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
assert ing._use_rep == 'X_pca'
assert ing._obsm['rep'].shape == (adata_new.n_obs, 30)
assert not ing._pca_centered
sc.pp.neighbors(adata_ref, use_rep='X')
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
assert ing._use_rep == 'X'
assert ing._obsm['rep'] is adata_new.X
def test_neighbors(adatas):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
ing.neighbors(k=10)
indices = ing._indices
tree = KDTree(adata_ref.obsm['X_pca'])
true_indices = tree.query(ing._obsm['rep'], 10, return_distance=False)
num_correct = 0.0
for i in range(adata_new.n_obs):
num_correct += np.sum(np.in1d(true_indices[i], indices[i]))
percent_correct = num_correct / (adata_new.n_obs * 10)
assert percent_correct > 0.99
@pytest.mark.parametrize('n', [3, 4])
def test_neighbors_defaults(adatas, n):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
sc.pp.neighbors(adata_ref, n_neighbors=n)
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
ing.neighbors()
assert ing._indices.shape[1] == n
@pytest.mark.skipif(
pkg_version("anndata") < sc.tl._ingest.ANNDATA_MIN_VERSION,
reason="`AnnData.concatenate` does not concatenate `.obsm` in old anndata versions",
)
def test_ingest_function(adatas):
adata_ref = adatas[0].copy()
adata_new = adatas[1].copy()
sc.tl.ingest(
adata_new,
adata_ref,
obs='bulk_labels',
embedding_method=['umap', 'pca'],
inplace=True,
)
assert 'bulk_labels' in adata_new.obs
assert 'X_umap' in adata_new.obsm
assert 'X_pca' in adata_new.obsm
ad = sc.tl.ingest(
adata_new,
adata_ref,
obs='bulk_labels',
embedding_method=['umap', 'pca'],
inplace=False,
)
assert 'bulk_labels' in ad.obs
assert 'X_umap' in ad.obsm
assert 'X_pca' in ad.obsm
def test_ingest_map_embedding_umap():
adata_ref = sc.AnnData(X)
adata_new = sc.AnnData(T)
sc.pp.neighbors(
adata_ref, method='umap', use_rep='X', n_neighbors=4, random_state=0
)
sc.tl.umap(adata_ref, random_state=0)
ing = sc.tl.Ingest(adata_ref)
ing.fit(adata_new)
ing.map_embedding(method='umap')
reducer = UMAP(min_dist=0.5, random_state=0, n_neighbors=4)
reducer.fit(X)
umap_transformed_t = reducer.transform(T)
assert np.allclose(ing._obsm['X_umap'], umap_transformed_t)
|
bsd-3-clause
| 1,639,938,025,560,681,500
| 23.640523
| 88
| 0.602918
| false
| 2.571623
| true
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_product_policy_operations.py
|
1
|
21264
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProductPolicyOperations(object):
"""ProductPolicyOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_product(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyCollection"
"""Get the policy configuration at the Product level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyCollection, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyCollection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.list_by_product.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_product.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies'} # type: ignore
def get_entity_tag(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
policy_id, # type: Union[str, "_models.PolicyIdName"]
**kwargs # type: Any
):
# type: (...) -> bool
"""Get the ETag of the policy configuration at the Product level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param policy_id: The identifier of the Policy.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get_entity_tag.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'policyId': self._serialize.url("policy_id", policy_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
policy_id, # type: Union[str, "_models.PolicyIdName"]
format="xml", # type: Optional[Union[str, "_models.PolicyExportFormat"]]
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyContract"
"""Get the policy configuration at the Product level.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param policy_id: The identifier of the Policy.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param format: Policy Export Format.
:type format: str or ~azure.mgmt.apimanagement.models.PolicyExportFormat
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'policyId': self._serialize.url("policy_id", policy_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if format is not None:
query_parameters['format'] = self._serialize.query("format", format, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('PolicyContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
policy_id, # type: Union[str, "_models.PolicyIdName"]
parameters, # type: "_models.PolicyContract"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.PolicyContract"
"""Creates or updates policy configuration for the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param policy_id: The identifier of the Policy.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param parameters: The policy contents to apply.
:type parameters: ~azure.mgmt.apimanagement.models.PolicyContract
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.PolicyContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'policyId': self._serialize.url("policy_id", policy_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PolicyContract')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('PolicyContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('PolicyContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
service_name, # type: str
product_id, # type: str
policy_id, # type: Union[str, "_models.PolicyIdName"]
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes the policy configuration at the Product.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param product_id: Product identifier. Must be unique in the current API Management service
instance.
:type product_id: str
:param policy_id: The identifier of the Policy.
:type policy_id: str or ~azure.mgmt.apimanagement.models.PolicyIdName
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'productId': self._serialize.url("product_id", product_id, 'str', max_length=256, min_length=1),
'policyId': self._serialize.url("policy_id", policy_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/products/{productId}/policies/{policyId}'} # type: ignore
|
mit
| 2,958,798,404,665,335,000
| 49.992806
| 222
| 0.644987
| false
| 4.087659
| true
| false
| false
|
mobarski/sandbox
|
rsm/v10/common2.py
|
1
|
1795
|
from __future__ import print_function
import numpy as np
from random import shuffle, random,seed
from time import time
from heapq import nlargest
from collections import deque,Counter
from itertools import cycle
import marshal
from pprint import pprint
import sys
def combinations(n,k):
"return k from n combination"
out = list(range(n))
shuffle(out)
return out[:k]
def random_vector(n,lo=0,hi=1):
"return 1d uniform random vector"
return np.random.randint(lo,hi+1,n)
def random_sparse_vector(n,lo=0,hi=1,d=0.1,k=None):
"return 1d random vector with some of its values set to zero"
sparse = np.zeros(n)
k = k or int(d*n)
positions = combinations(n,k)
sparse[list(positions)] = random_vector(k,lo+1,hi)
return sparse
def top(k,d,items=False,values=False):
"return k elements with largest values from dictionary"
if items:
return nlargest(k,((x,d[x]) for x in d),key=lambda x:x[1])
elif values:
return nlargest(k,d.values())
else:
return nlargest(k,d,key=lambda x:d[x])
def clock(label,t0,t1=None):
"print execution time"
dt = time()-t0 if t1==None else t1-t0
print("{:.3f}\t{}".format(dt,label))
def avg(v):
"average"
return 1.0*sum(v)/len(v)
def gini(data):
"gini index"
g = 0
for a in data:
for b in data:
g += abs(a-b)
return float(g)/(2.0*len(data)*sum(data))
def pick(v_set,n):
"select n random values from a set"
if n<=0: return []
out = list(v_set)
shuffle(out)
return out[:n]
if __name__=="__main__":
x = random_vector(30,0,1)
print(x)
y = random_vector(30,0,1)
print(y)
print(x+y)
print(combinations(10,5))
d = dict(enumerate(x+y))
print(top(3,d,values=True))
print(top(2,dict(a=1,b=2,c=3),values=True))
print(top(2,dict(a=1,b=2,c=3),values=False))
print(random_sparse_vector(20,d=0.2))
print(random_sparse_vector(20,k=10))
|
mit
| -8,747,349,751,396,877,000
| 22.618421
| 62
| 0.687465
| false
| 2.442177
| false
| false
| false
|
CroMarmot/MyOICode
|
ProjectEuler/p233.py
|
1
|
5380
|
p = dict()
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def main():
# 420 / 4 = 105
p[1] = 1
m = 2
maxv = 2*10**7
while m * m < maxv:
n = m - 1
while n > 0 and n*n-m*m+2*n*m > 0:
if gcd(n, m) != 1:
n -= 1
continue
n2m2 = n*n + m*m
if n2m2 % 2 == 0:
n2m2 //= 2
# print((n*n+2*m*n-m*m)//2, (m*m+2*m*n-n*n)//2, n2m2)
else:
# print(n*n+2*m*n-m*m, m*m+2*m*n-n*n, n2m2)
pass
if n2m2 <= maxv:
if n2m2 in p:
p[n2m2] += 2
else:
p[n2m2] = 2
# print(n2m2)
n -= 1
m += 1
# print('m', m)
print("finish p")
for i in range(1, maxv):
if i % 2 == 0:
continue
s = 0
for j in range(1, 10**4):
if j * j > i:
break
if i % j == 0 and j <= i//j:
if j in p:
s += p[j]
if i//j > j and i//j in p:
s += p[i//j]
if 4 * s == 420:
print(i, 4 * s)
main()
print("end")
"""
359125 420
469625 420
612625 420
781625 420
866125 420
933725 420
1047625 420
1077375 420
1119625 420
1288625 420
1336625 420
1366625 420
1408875 420
1481125 420
1542125 420
1592825 420
1596725 420
1787125 420
1837875 420
1880125 420
1914625 420
2032225 420
2049125 420
2133625 420
2203625 420
2224625 420
2251925 420
2302625 420
2344875 420
2387125 420
2513875 420
2598375 420
2637125 420
2731625 420
2801175 420
2894125 420
2909125 420
2911025 420
3142875 420
3147625 420
3174665 420
3215125 420
3232125 420
3287375 420
3316625 420
3350425 420
3358875 420
3504125 420
3561925 420
3572125 420
3648625 420
3654625 420
3823625 420
3865875 420
3889625 420
3937625 420
3950375 420
4009525 420
4009875 420
4077125 420
4082125 420
4099875 420
4151485 420
4161625 420
4226625 420
4288375 420
4310125 420
4443375 420
4544525 420
4564625 420
4626375 420
4778475 420
4790175 420
4837625 420
4888325 420
4922125 420
4949125 420
4962625 420
5035825 420
5091125 420
5165875 420
5327725 420
5361375 420
5382625 420
5429125 420
5471375 420
5513625 420
5547425 420
5571625 420
5640375 420
5671625 420
5682625 420
5743875 420
5851625 420
5936125 420
5969125 420
5986825 420
6046625 420
6062875 420
6093625 420
6096675 420
6147375 420
6189625 420
6206525 420
6249625 420
6400875 420
6412625 420
6509725 420
6536075 420
6538625 420
6610875 420
6612125 420
6673875 420
6696625 420
6738875 420
6755775 420
6823375 420
6907875 420
6972125 420
7016125 420
7034625 420
7116625 420
7119125 420
7161375 420
7333375 420
7372625 420
7457125 420
7492325 420
7524725 420
7541625 420
7674125 420
7774625 420
7795125 420
7837375 420
7879625 420
7907125 420
7911375 420
7926425 420
8183825 420
8194875 420
8217625 420
8259875 420
8272625 420
8386625 420
8403525 420
8417125 420
8471125 420
8597875 420
8623225 420
8640125 420
8659625 420
8682375 420
8706125 420
8727375 420
8733075 420
8893625 420
8922875 420
8966225 420
9020375 420
9069625 420
9147125 420
9238385 420
9284125 420
9356125 420
9356375 420
9428625 420
9442875 420
9485125 420
9502025 420
9523995 420
9527375 420
9566375 420
9645375 420
9654125 420
9696375 420
9717625 420
9738625 420
9862125 420
9941425 420
9949875 420
10006625 420
10051275 420
10076625 420
10151125 420
10182625 420
10197125 420
10270975 420
10365325 420
10367875 420
10438625 420
10512375 420
10584625 420
10600525 420
10617625 420
10685775 420
10716375 420
10752625 420
10794875 420
10801375 420
10820225 420
10931425 420
10945875 420
10963875 420
11006125 420
11132875 420
11136625 420
11149775 420
11177075 420
11307125 420
11324125 420
11428625 420
11451625 420
11458625 420
11470875 420
11523875 420
11597625 420
11639875 420
11668875 420
11766625 420
11812875 420
11851125 420
11879125 420
11914025 420
12020125 420
12028575 420
12029625 420
12189125 420
12231375 420
12246375 420
12299625 420
12315875 420
12405325 420
12454455 420
12484875 420
12492125 420
12509875 420
12527125 420
12607625 420
12679875 420
12752125 420
12797525 420
12817625 420
12865125 420
12930375 420
12949625 420
12991625 420
13160875 420
13236925 420
13330125 420
13402375 420
13633575 420
13693875 420
13879125 420
13879225 420
14090375 420
14174875 420
14225575 420
14335425 420
14343875 420
14370525 420
14512875 420
14558375 420
14664975 420
14702875 420
14766375 420
14775125 420
14847375 420
14850875 420
14887875 420
14935375 420
15032875 420
15107475 420
15208625 420
15230125 420
15273375 420
15425375 420
15433925 420
15442375 420
15497625 420
15572375 420
15763475 420
15983175 420
16084125 420
16118375 420
16147875 420
16287375 420
16292375 420
16414125 420
16456375 420
16540875 420
16642275 420
16709875 420
16714875 420
16878875 420
16921125 420
16963375 420
17014875 420
17047875 420
17231625 420
17521075 420
17554875 420
17563975 420
17597125 420
17740775 420
17808375 420
17907375 420
17960475 420
17977375 420
18139875 420
18186625 420
18188625 420
18280875 420
18290025 420
18300925 420
18442125 420
18459875 420
18568875 420
18619575 420
18748875 420
18991375 420
19121375 420
19202625 420
19237875 420
19529175 420
19608225 420
19615875 420
19658375 420
19832625 420
19836375 420
19904875 420
19920875 420
"""
|
gpl-3.0
| 8,071,894,136,991,945,000
| 12.830334
| 69
| 0.712268
| false
| 2.183442
| false
| false
| false
|
linjinjin123/leetcode
|
剑指offer/平衡二叉树.py
|
1
|
1099
|
# 题目描述
# 输入一棵二叉树,判断该二叉树是否是平衡二叉树。
# -*- coding:utf-8 -*-
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def IsBalanced_Solution(self, pRoot):
# write code here
if pRoot == None: return True
return abs(self.get_depth(pRoot.left) - self.get_depth(pRoot.right)) <= 1 \
and self.IsBalanced_Solution(pRoot.left) \
and self.IsBalanced_Solution(pRoot.right)
def get_depth(self, root):
if root == None: return 0
stack = []
depth = 1
max_depth = 1
while root != None or len(stack) > 0:
if root != None:
stack.append([root, depth])
root = root.left
depth += 1
else:
root, depth = stack.pop()
if depth > max_depth:
max_depth = depth
root = root.right
depth += 1
return max_depth
|
mit
| -3,442,280,454,682,868,000
| 28.885714
| 83
| 0.480383
| false
| 3.415033
| false
| false
| false
|
samj1912/picard
|
picard/ui/tagsfromfilenames.py
|
1
|
5318
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2006 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
import os.path
from PyQt5 import QtCore, QtWidgets
from picard import config
from picard.ui.util import StandardButton
from picard.ui import PicardDialog
from picard.ui.ui_tagsfromfilenames import Ui_TagsFromFileNamesDialog
from picard.util.tags import display_tag_name
class TagsFromFileNamesDialog(PicardDialog):
defaultsize = QtCore.QSize(560, 400)
options = [
config.TextOption("persist", "tags_from_filenames_format", ""),
]
def __init__(self, files, parent=None):
super().__init__(parent)
self.ui = Ui_TagsFromFileNamesDialog()
self.ui.setupUi(self)
items = [
"%artist%/%album%/%title%",
"%artist%/%album%/%tracknumber% %title%",
"%artist%/%album%/%tracknumber% - %title%",
"%artist%/%album% - %tracknumber% - %title%",
"%artist% - %album%/%title%",
"%artist% - %album%/%tracknumber% %title%",
"%artist% - %album%/%tracknumber% - %title%",
]
tff_format = config.persist["tags_from_filenames_format"]
if tff_format not in items:
selected_index = 0
if tff_format:
items.insert(0, tff_format)
else:
selected_index = items.index(tff_format)
self.ui.format.addItems(items)
self.ui.format.setCurrentIndex(selected_index)
self.ui.buttonbox.addButton(StandardButton(StandardButton.OK), QtWidgets.QDialogButtonBox.AcceptRole)
self.ui.buttonbox.addButton(StandardButton(StandardButton.CANCEL), QtWidgets.QDialogButtonBox.RejectRole)
self.ui.buttonbox.accepted.connect(self.accept)
self.ui.buttonbox.rejected.connect(self.reject)
self.ui.preview.clicked.connect(self.preview)
self.ui.files.setHeaderLabels([_("File Name")])
self.files = files
self.items = []
for file in files:
item = QtWidgets.QTreeWidgetItem(self.ui.files)
item.setText(0, os.path.basename(file.filename))
self.items.append(item)
self._tag_re = re.compile(r"(%\w+%)")
self.numeric_tags = ('tracknumber', 'totaltracks', 'discnumber', 'totaldiscs')
def parse_response(self):
tff_format = self.ui.format.currentText()
columns = []
format_re = ['(?:^|/)']
for part in self._tag_re.split(tff_format):
if part.startswith('%') and part.endswith('%'):
name = part[1:-1]
columns.append(name)
if name in self.numeric_tags:
format_re.append('(?P<' + name + r'>\d+)')
elif name in ('date'):
format_re.append('(?P<' + name + r'>\d+(?:-\d+(?:-\d+)?)?)')
else:
format_re.append('(?P<' + name + '>[^/]*?)')
else:
format_re.append(re.escape(part))
format_re.append(r'\.(\w+)$')
format_re = re.compile("".join(format_re))
return format_re, columns
def match_file(self, file, tff_format):
match = tff_format.search(file.filename.replace('\\','/'))
if match:
result = {}
for name, value in match.groupdict().items():
value = value.strip()
if name in self.numeric_tags:
value = value.lstrip("0")
if self.ui.replace_underscores.isChecked():
value = value.replace('_', ' ')
result[name] = value
return result
else:
return {}
def preview(self):
tff_format, columns = self.parse_response()
self.ui.files.setHeaderLabels([_("File Name")] + list(map(display_tag_name, columns)))
for item, file in zip(self.items, self.files):
matches = self.match_file(file, tff_format)
for i, column in enumerate(columns):
item.setText(i + 1, matches.get(column, ''))
self.ui.files.header().resizeSections(QtWidgets.QHeaderView.ResizeToContents)
self.ui.files.header().setStretchLastSection(True)
def accept(self):
tff_format, columns = self.parse_response()
for file in self.files:
metadata = self.match_file(file, tff_format)
for name, value in metadata.items():
file.metadata[name] = value
file.update()
config.persist["tags_from_filenames_format"] = self.ui.format.currentText()
super().accept()
|
gpl-2.0
| 7,357,525,650,345,461,000
| 40.523438
| 113
| 0.598307
| false
| 3.862645
| false
| false
| false
|
jplusplus/thenmap-v0
|
generators/utils/guess-nation-codes.py
|
1
|
2869
|
# coding=utf-8
#Try and create nation codes (class names) from nation names in a csv
import csv
import argparse
import os.path
import sys
import shlex
#Check if file exists
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
#Define command line arguments
parser = argparse.ArgumentParser(description='Try and create Thenmap nation codes (typically two letter iso codes) from nation names in a csv.')
#Input file
parser.add_argument("-i", "--input", dest="infile", required=True,
help="input file", metavar="FILE",
type=lambda x: is_valid_file(parser,x))
#Output file
parser.add_argument("-o", "--output", dest="outfile",
help="output file", metavar="FILE")
#Column
parser.add_argument("-c", "--column", dest="column",
help="column to search and replace, starting from 0", type=int, default=0)
args = parser.parse_args()
inputFile = args.infile #"/home/leo/Världen/demo/patents/raw-pre.csv"
if args.outfile is None:
print "No output file given. Really overwrite input file? [y/N]"
choice = raw_input().lower()
if not choice in ('y', 'yes'):
sys.exit()
outputFile = inputFile
else:
if os.path.isfile(args.outfile):
print "File %s already exists. Overwrite? [y/N]" % args.outfile
choice = raw_input().lower()
if not choice in ('y', 'yes'):
sys.exit()
outputFile = args.outfile
indataColumn = args.column
outdataColumn = indataColumn
keyDict = {}
try:
with open('nation-keys.csv', 'rb') as csvfile:
keyreader = csv.reader(csvfile,delimiter=',',quotechar='"')
for row in keyreader:
#Swedish name -> code
if row[1]:
keyDict[row[1]] = row[0]
#English name -> code
if row[2]:
keyDict[row[2]] = row[0]
#Alisases -> code
if row[3]:
#Use csv module to split string by comma, respecting quotes
aliases = csv.reader([row[3]],skipinitialspace=True)
for a in aliases.next():
keyDict[a] = row[0]
#ISO alpha 3 ("CHE")
if row[4]:
keyDict[row[4]] = row[0]
#OECD ("CHE: Switzerland")
if row[5]:
keyDict[row[5]] = row[0]
except IOError:
print ("Could not open key file")
#print keyDict
outdata = []
try:
with open(inputFile, 'rb') as csvfile:
datacsv = csv.reader(csvfile,delimiter=',',quotechar='"')
firstRow = True
for row in datacsv:
if firstRow:
firstRow = False
else:
nationname = row[indataColumn].strip()
if nationname in keyDict:
row[outdataColumn] = keyDict[nationname]
else:
print "Could not find %s" % nationname
outdata.append(row)
try:
with open(outputFile, 'wb') as csvfile:
writer = csv.writer(csvfile,delimiter=',',quotechar='"')
for row in outdata:
writer.writerow(row)
except IOError:
print ("Could not open output file")
except IOError:
print ("Could not open input file")
|
gpl-2.0
| -8,708,814,281,626,982,000
| 24.837838
| 144
| 0.664226
| false
| 3.028511
| false
| false
| false
|
tensorflow/lingvo
|
lingvo/core/task_scheduler.py
|
1
|
11531
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-task task sampling schedules."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import early_stop
import numpy as np
class TaskScheduler(base_layer.BaseLayer):
"""Generic multi-task scheduler.
Subclasses should override the `Sample` method to return a task string given
a step. All of the task strings as well as additional hyperparameters needed
by `Sample` should be exposed and stored in the params. `Sample` should also
update `cur_probs`.
"""
@classmethod
def Params(cls):
"""Parameters for this task scheduler."""
p = super().Params()
p.name = 'task_scheduler'
return p
def __init__(self, params):
super().__init__(params)
self.cur_probs = None
self.SetVariableFree()
def Sample(self, current_step):
raise NotImplementedError('Abstract method')
class AdaptiveScheduler(TaskScheduler):
"""Tasks with low scores will be sampled more often.
Scores are expected to be non-negative. Larger scores are better."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('tasks', [], 'List of tasks')
p.Define('expected', [], 'List of final expected scores')
p.Define('mh_a', early_stop.MetricHistory.Params(), '')
p.Define('mh_b', early_stop.MetricHistory.Params(), '')
p.Define(
'epsilon', 0.05, 'Regularizarion term. A large epsilon will lead'
'to a more uniform task distribution.')
p.Define('alpha', 1.0, 'Normalized task scores are raised to this power.')
return p
def __init__(self, params):
super().__init__(params)
if len(self.params.tasks) != 2 or len(self.params.expected) != 2:
raise ValueError('Only two tasks are supported by this scheduler.')
if self.params.epsilon < 0:
raise ValueError('Epsilon should be positive.')
self.tasks = self.params.tasks
self.last_scores = [0.0] * 2
self._metric_histories = [
early_stop.MetricHistory(self.params.mh_a),
early_stop.MetricHistory(self.params.mh_b)
]
def getMetricHistories(self):
# If too slow, consider another implementation.
# TODO(sebjean) Time file reading and change behaviour if too long.
for index, mh in enumerate(self._metric_histories):
try:
with tf.io.gfile.GFile(mh.hist_file) as f:
lines = f.readlines()
except tf.errors.NotFoundError:
tf.logging.warning('File not found. '
'Expected at start of training only.')
score, lines = 0.0, []
if lines:
try:
score = lines[-1].split()[-1]
except IndexError:
tf.logging.warning(
'IndexError. Your history file may be corrupted.')
score = 0.0
self.last_scores[index] = float(score)
class SimpleAdaptiveScheduler(AdaptiveScheduler):
"""Simple adaptive scheduler.
A task with a normalized score of `s` is approximately weighted as `1 - s`.
"""
def Sample(self, current_step):
"""Sample a task.
The unnormalized probability of a task if given by
1 + epsilon - min(1, score / expected)**alpha.
Args:
current_step: Unused.
Returns:
str, the name of the sampled task.
"""
del current_step # Unused
self.getMetricHistories()
alpha, eps = self.params.alpha, self.params.epsilon
probs = [
1 + eps - min(1, score / self.params.expected[index])**alpha
for index, score in enumerate(self.last_scores)
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.params.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class InverseRatioAdaptiveScheduler(AdaptiveScheduler):
"""Inverse ratio adaptive scheduler.
Tasks are approximately weighed as the inverse of their normalized scores.
"""
def Sample(self, current_step):
"""Sample a task.
The unnormalized probability of a task if given by
1 / (min(1, score / expected)**alpha + epsilon)
Args:
current_step: Unused.
Returns:
str, the name of the sampled task.
"""
del current_step # Unused
self.getMetricHistories()
alpha, eps = self.params.alpha, self.params.epsilon
probs = [
1.0 / (min(1, score / self.params.expected[index])**alpha + eps)
for index, score in enumerate(self.last_scores)
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.params.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class ShiftedExponentialScheduler(TaskScheduler):
"""The unnormalized score of each task follows a shifted exponential function.
Generalizes the constant, exponential and sigmoid
schedules described in "Scheduled Multi-Task Learning: From Syntax to
Translation" (Kiperwasser and Ballesteros).
https://arxiv.org/pdf/1804.08915.pdf
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'alpha', 0, 'Controls the rate at which the schedule changes. '
'A large alpha will lead to fast convergence toward final values.')
p.Define(
'task_probs', [], 'List of 2-tuples (task, prob). For non-constant'
'schedulers, prob is a tuple of the form (init_prob, final_prob).')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.task_probs, list)
self.tasks = []
self._descriptors = []
def Sample(self, current_step):
"""Sample a task.
Given an input [a, b] and a rate `alpha`, the unnormalized
score of eack task is a + b * exp(-alpha * t).
Args:
current_step: int. Current time step.
Returns:
str, the name of the sampled task.
"""
probs = [
a + b * np.exp(-self.params.alpha * current_step)
for a, b in self._descriptors
]
probs = tuple(probs / np.sum(probs))
sampled_task = np.random.choice(self.tasks, p=probs)
self.cur_probs = probs
return sampled_task
class ConstantScheduler(ShiftedExponentialScheduler):
"""Constant schedule. Tasks are sampled from a fixed probability distribution.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value, 0))
class ExponentialScheduler(ShiftedExponentialScheduler):
"""Exponential schedule.
For a task with initial and final probabilities p_0 and p_1 respectively,
its unnormalized score is given by
`p_1 + (p_0 - p_1) * exp(-alpha * current_step)`.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value[1], value[0] - value[1]))
class SigmoidScheduler(ShiftedExponentialScheduler):
"""Sigmoid schedule.
For a task with initial and final probabilities p_0 and p_1 respectively,
its unnormalized score is given by
`p_1 + (2 * p_0 - p_1) * exp(-alpha * current_step)`.
"""
def __init__(self, params):
super().__init__(params)
for key, value in self.params.task_probs:
self.tasks.append(key)
self._descriptors.append((value[1], 2 * value[0] - value[1]))
class RoundRobinScheduler(TaskScheduler):
"""Deterministic sequential schedule."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('tasks', [], 'List of task names. No repetitions allowed.')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.tasks, list)
self.tasks = sorted(self.params.tasks)
self.n_tasks = len(self.tasks)
self.cur_probs = [1. / self.n_tasks] * self.n_tasks # For summary
self.next_task_idx = 0
def Sample(self, current_step):
"""Sample a task."""
sampled_task = self.tasks[self.next_task_idx]
self.next_task_idx = (self.next_task_idx + 1) % self.n_tasks
return sampled_task
class SequentialScheduler(TaskScheduler):
"""Deterministic schedule that stays a fixed number of steps on each task."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'task_steps', [], 'List of tuples of (task_name, steps_for_task). Goes '
'through list sequentially in the specified order, staying '
'steps_for_task steps on task_name. On completing the schedule, '
'remains on the final task for the rest of the time. Assumes '
'p.task_global_step is False.')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.task_steps, list)
assert self.params.task_steps
self.task_steps = []
for (name, steps) in self.params.task_steps:
assert steps > 0
if self.task_steps:
self.task_steps.append((name, steps + self.task_steps[-1][1]))
else:
self.task_steps.append((name, steps))
self.n_tasks = len(self.task_steps)
self.task_idx = 0
self.cur_probs = [1] + [0] * (self.n_tasks - 1) # For summary
def Sample(self, current_step):
"""Sample a task."""
sampled_task, to_step = self.task_steps[self.task_idx]
if current_step >= to_step and self.task_idx < self.n_tasks - 1:
self.task_idx += 1
sampled_task = self.task_steps[self.task_idx][0]
self.cur_probs[self.task_idx - 1] = 0
self.cur_probs[self.task_idx] = 1
return sampled_task
class PieceWiseScheduler(TaskScheduler):
"""Piecewise scheduler using different scheduling strategies."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'schedule_steps', [], 'List of tuples of (schedule_class_params, '
'number of steps to use this schedule class)')
return p
def __init__(self, params):
super().__init__(params)
assert isinstance(self.params.schedule_steps, list)
self.schedule_steps = []
self.schedule_params = []
for (cls_params, steps) in self.params.schedule_steps:
if self.schedule_steps:
self.schedule_steps.append(steps + self.schedule_steps[-1])
else:
self.schedule_steps.append(steps)
self.schedule_params.append(cls_params)
self.CreateChildren('schedules', self.schedule_params)
self.n_schedules = len(self.schedule_steps)
self.schedule_idx = 0
self.task_step_offset = 0
self.cur_probs = self.schedules[0].cur_probs
def Sample(self, current_step):
"""Sample a task."""
to_step = self.schedule_steps[self.schedule_idx]
if current_step >= to_step and self.schedule_idx < self.n_schedules - 1:
self.task_step_offset = to_step
self.schedule_idx += 1
cur_schedule = self.schedules[self.schedule_idx]
sampled_task = cur_schedule.Sample(current_step - self.task_step_offset)
self.cur_probs = cur_schedule.cur_probs
return sampled_task
|
apache-2.0
| -3,984,654,347,267,109,000
| 30.164865
| 80
| 0.652155
| false
| 3.711297
| false
| false
| false
|
tungvx/deploy
|
Django-0.90/tests/testapp/models/m2o_recursive.py
|
1
|
1066
|
"""
11. Relating an object to itself, many-to-one
To define a many-to-one relationship between a model and itself, use
``ForeignKey('self')``.
In this example, a ``Category`` is related to itself. That is, each
``Category`` has a parent ``Category``.
Set ``related_name`` to designate what the reverse relationship is called.
"""
from django.core import meta
class Category(meta.Model):
name = meta.CharField(maxlength=20)
parent = meta.ForeignKey('self', null=True, related_name='child')
class META:
module_name = 'categories'
def __repr__(self):
return self.name
API_TESTS = """
# Create a few Category objects.
>>> r = categories.Category(id=None, name='Root category', parent=None)
>>> r.save()
>>> c = categories.Category(id=None, name='Child category', parent=r)
>>> c.save()
>>> r.get_child_list()
[Child category]
>>> r.get_child(name__startswith='Child')
Child category
>>> r.get_parent()
Traceback (most recent call last):
...
CategoryDoesNotExist
>>> c.get_child_list()
[]
>>> c.get_parent()
Root category
"""
|
apache-2.0
| 3,638,889,243,003,648,500
| 23.227273
| 74
| 0.67167
| false
| 3.405751
| false
| false
| false
|
cadappl/krep
|
krep_subcmds/help_subcmd.py
|
1
|
2299
|
from topics import key_compare, SubCommand
class HelpSubcmd(SubCommand):
COMMAND = 'help'
help_summary = 'Print the command summaries'
help_usage = '''\
%prog <subcmd> ...
Display the detailed usage of the sub-command or the list of all supported
sub-commands.
Environment variables KREP_EXTRA_PATH and KREP_SUBCMD_PATH could define new
external sub-commands. Try to define the variables if required.
The argument "all" indicats to list all sub-commands implicitly.'''
def _print_all_commands(self):
print('Usage: krep subcmd [args] ...')
print('The commands of krep are:')
print('')
lines = list()
for name, cmd in self.commands.items(): # pylint: disable=E1101
try:
summary = cmd.help_summary.strip()
except AttributeError:
summary = 'No Summary'
if name in getattr(cmd, 'ALIASES', list()):
summary = 'Alias of "%s"' % getattr(cmd, 'COMMAND', cmd.NAME)
lines.append(' %-15s%s' % (name, summary))
def sort_help(linea, lineb):
def _is_help_command(line):
return line.lstrip().startswith('help')
if _is_help_command(linea):
return -1
elif _is_help_command(lineb):
return 1
return (linea > lineb) - (linea < lineb) # cmp(linea, lineb)
# put help command on the top
lines.sort(key=key_compare(sort_help))
print('\n'.join(lines))
print('\nSee more info with "krep help <command>"')
def _print_command(self, command):
if command not in self.commands: # pylint: disable=E1101
print('krep: "%s" is not a known command' % command)
else:
try:
cmd = self.commands[command] # pylint: disable=E1101
help_usage = cmd.help_usage
except AttributeError:
help_usage = 'Failed to read the command help.'
print(help_usage.replace('%prog', 'krep %s' % command))
def execute(self, options, *args): # pylint: disable=W0613
if len(args) == 0 or 'all' in args:
self._print_all_commands()
else:
for arg in args:
self._print_command(arg)
|
lgpl-3.0
| -4,619,710,783,916,100,000
| 32.318841
| 77
| 0.570248
| false
| 3.936644
| false
| false
| false
|
kevinkahn/softconsole
|
screens/screen.py
|
1
|
16940
|
import collections
import pygame
import functools
from guicore.switcher import SwitchScreen
import config
import hubs.hubs
import logsupport
import screens.__screens as screens
import stores.paramstore as paramstore
import stores.valuestore as valuestore
from keyspecs import toucharea
from logsupport import ConsoleError, ConsoleWarning, ConsoleDetail
from utils.utilfuncs import wc, tint, fmt
from utils import timers, utilities, fonts, displayupdate, hw
ScreenParams = {'DimTO': 99,
'CharColor': "white",
'PersistTO': 20,
'BackgroundColor': 'maroon',
'CmdKeyCol': "red",
'CmdCharCol': "white",
'DefaultHub': '',
'KeyColor': "aqua",
'KeyColorOn': "",
'KeyColorOff': "",
'KeyCharColorOn': "white",
'KeyCharColorOff': "black",
'KeyOnOutlineColor': "white",
'KeyOffOutlineColor': "black",
'KeyOutlineOffset': 3,
'KeyLabelOn': ['', ],
'KeyLabelOff': ['', ],
'ScreenTitleColor': "white",
'ScreenTitleSize': 50,
'ScreenTitle': '',
'ScreenTitleFields': ['', ],
'HorizBorder': 20,
'TopBorder': 20,
'BotBorder': 60,
'BotBorderWONav': 20,
'HorizButtonGap': 0,
'VertButGap': 0,
'NavKeyHeight': 60,
'HorizButGap': 0,
'NavKeyWidth': 60
}
screenStore = valuestore.NewValueStore(paramstore.ParamStore('ScreenParams'))
BACKTOKEN = None
HOMETOKEN = None
SELFTOKEN = None
def CommonClockTick(params):
# noinspection PyProtectedMember
config.AS._ClockTick(params)
# noinspection PyProtectedMember
def CommonClockTickValid():
return config.AS._ClockTickValid()
StdScreenClock = 1
ScreenClocks = {StdScreenClock: timers.RepeatingPost(StdScreenClock, paused=True, start=True, name='StdScreenClock',
proc=CommonClockTick, eventvalid=CommonClockTickValid)}
RunningScreenClock = ScreenClocks[StdScreenClock]
def InitScreenParams(parseconfig):
screens.screenStore = screenStore
for p, v in ScreenParams.items():
screenStore.SetVal(p, type(v)(parseconfig.get(p, v)))
def GoToScreen(NS, newstate='NonHome'):
SwitchScreen(NS, 'Bright', 'Go to Screen', newstate=newstate)
def PushToScreen(NS, newstate='NonHome', msg='Push to Screen'):
SwitchScreen(NS, 'Bright', msg, newstate=newstate, push=True)
def PopScreen(msg='PopScreen', newstate='Maint'):
SwitchScreen(BACKTOKEN, 'Bright', msg, newstate=newstate)
def IncorporateParams(this, clsnm, theseparams, screensection):
if screensection is None: screensection = {}
for p in theseparams:
if isinstance(theseparams, dict):
if theseparams[p] is not None: this.userstore.SetVal(p, theseparams[p]) # a value was set in config file
else:
if p in screensection:
# this.userstore.SetVal(p, type(ScreenParams[p])(screensection.get(p, ""))) # string only safe default
this.userstore.SetVal(p, type(ScreenParams[p])(screensection.get(p, ScreenParams[p])))
def AddUndefaultedParams(this, screensection, **kwargs):
if screensection is None: screensection = {}
for n, v in kwargs.items():
if n in this.__dict__: del this.__dict__[n] # remove if it was declared statically
this.userstore.SetVal(n, type(v)(screensection.get(n, v)))
def FlatenScreenLabel(label):
scrlabel = label[0]
for s in label[1:]:
scrlabel = scrlabel + " " + s
return scrlabel
def ButLayout(butcount):
# 1 2 3 4 5 6 7 8 9 10
plan = ((1, 1), (1, 2), (1, 3), (2, 2), (1, 5), (2, 3), (2, 4), (2, 4), (3, 3), (4, 3),
# 11 12 13 14 15 16 17 18 19 20
(4, 3), (4, 3), (4, 4), (4, 4), (4, 4), (4, 4), (5, 4), (5, 4), (5, 4), (5, 4))
if butcount in range(1, 21):
return plan[butcount - 1]
else:
logsupport.Logs.Log("Button layout error - too many or no buttons: {}".format(butcount), severity=ConsoleError)
return 5, 5
class ScreenDesc(object):
"""
Basic information about a screen, subclassed by all other screens to handle this information
"""
def __setattr__(self, key, value):
if key not in ScreenParams:
object.__setattr__(self, key, value)
else:
self.userstore.SetVal(key, value)
# object.__setattr__(self, key, value)
def __getattr__(self, key):
return self.userstore.GetVal(key)
def __init__(self, screensection, screenname, parentscreen=None, SingleUse=False, Type='unset'):
self.userstore = paramstore.ParamStore('Screen-' + screenname,
dp=screenStore if parentscreen is None else parentscreen.userstore,
locname=screenname)
# todo add routine to update allowable mods per screen - but rationalize with incorp parameters from hight level guys
self.ScreenType = Type
self.markradius = int(min(hw.screenwidth, hw.screenheight) * .025)
self.name = screenname
self.singleuse = SingleUse
self.used = False
self.WatchMotion = False
self.Active = False # true if actually on screen
self.ChildScreens = {} # used to do cascaded deleted if this screen is deleted. Only list one-off dependents
self.DefaultNavKeysShowing = True
self.NavKeysShowing = True
self.NavKeys = collections.OrderedDict()
self.Keys = collections.OrderedDict()
self.WithNav = True
self.useablevertspace = hw.screenheight - self.TopBorder - self.BotBorder
self.useablevertspacesansnav = hw.screenheight - self.TopBorder - self.BotBorderWONav
self.useablehorizspace = hw.screenwidth - 2 * self.HorizBorder
self.startvertspace = self.TopBorder
self.starthorizspace = self.HorizBorder
self.HubInterestList = {} # one entry per hub, each entry is a dict mapping addr to Node
self.ScreenTitleBlk = None
self.ScreenTitle = ''
self.prevkey = None
self.nextkey = None
self.NavKeyWidth = (hw.screenwidth - 2 * self.HorizBorder) // 2
cvertcenter = hw.screenheight - self.BotBorder / 2
self.homekey = toucharea.ManualKeyDesc(self, 'Back<' + 'Home', ('Home',),
self.CmdKeyCol, self.CmdCharCol, self.CmdCharCol,
proc=functools.partial(GoToScreen, HOMETOKEN),
center=(
self.starthorizspace + .5 * self.NavKeyWidth,
cvertcenter),
size=(self.NavKeyWidth, self.NavKeyHeight), gaps=True)
self.backkey = toucharea.ManualKeyDesc(self, 'Nav>' + 'Back', ('Back',),
self.CmdKeyCol, self.CmdCharCol, self.CmdCharCol,
proc=functools.partial(GoToScreen, BACKTOKEN),
center=(
self.starthorizspace + 1.5 * self.NavKeyWidth,
cvertcenter),
size=(self.NavKeyWidth, self.NavKeyHeight), gaps=True)
IncorporateParams(self, 'Screen',
{'CharColor', 'DimTO', 'PersistTO', 'BackgroundColor', 'CmdKeyCol', 'CmdCharCol',
'DefaultHub', 'ScreenTitle', 'ScreenTitleColor', 'ScreenTitleFields', 'ScreenTitleSize', 'KeyCharColorOn',
'KeyCharColorOff', 'KeyColor'}, screensection)
AddUndefaultedParams(self, screensection, label=[screenname])
try:
self.DefaultHubObj = hubs.hubs.Hubs[self.DefaultHub]
except KeyError:
self.DefaultHubObj = None # todo test what happens later or force this to be an exiting error
logsupport.Logs.Log("Bad default hub name for screen: ", screenname, severity=ConsoleError)
raise ValueError
self.DecodedScreenTitleFields = []
for f in self.ScreenTitleFields:
if ':' in f:
self.DecodedScreenTitleFields.append(f.split(':'))
# todo compute the vertical space issue for the title if non-null; generate and horiz space the title later
if self.ScreenTitle != '':
# adjust space for a title
tempblk, _ = self._GenerateTitleBlk(self.ScreenTitle, self.DecodedScreenTitleFields, self.ScreenTitleColor)
h = tempblk.get_height()
titlegap = h // 10 # todo is this the best way to space?
self.startvertspace = self.startvertspace + h + titlegap
self.useablevertspace = self.useablevertspace - h - titlegap
self.useablevertspacesansnav = self.useablevertspacesansnav - h - titlegap
self.ScreenTitleBlk = tempblk
self.ScreenClock = ScreenClocks[StdScreenClock]
self.ScreenTimers = [] # (Timer, Cancel Proc or None) Don't put ScreenCLock in the list or it gets canceled
utilities.register_example('ScreenDesc', self)
def _GenerateTitleBlk(self, title, fields, color):
vals = ['--' if v is None else v for v in [valuestore.GetVal(f) for f in fields]]
formattedTitle = fmt.format(title, *vals)
blk = fonts.fonts.Font(self.ScreenTitleSize, bold=True).render(formattedTitle, 0, wc(color))
w = blk.get_width()
return blk, w
def _ClockTickValid(self):
if not self.Active: print('Clock not valid {}'.format(self.name))
return self.Active
# noinspection PyUnusedLocal
def _ClockTick(self, params):
if not self.Active: return # avoid race with timer and screen exit
self.ClockTick()
def ClockTick(self): # this is meant to be overridden if screen want more complex operation
self.ReInitDisplay()
def SetScreenClock(self, interval):
# for screens with a non standard clocking rate
if interval in ScreenClocks:
self.ScreenClock = ScreenClocks[interval]
else:
self.ScreenClock = timers.RepeatingPost(interval, paused=True, start=True,
name='ScreenClock-' + str(interval),
proc=CommonClockTick, eventvalid=CommonClockTickValid)
ScreenClocks[interval] = self.ScreenClock
def CreateNavKeys(self, prevk, nextk):
cvertcenter = hw.screenheight - self.BotBorder / 2
self.prevkey = toucharea.ManualKeyDesc(self, 'Nav<' + prevk.name,
prevk.label,
prevk.CmdKeyCol, prevk.CmdCharCol,
prevk.CmdCharCol,
proc=functools.partial(GoToScreen, prevk),
center=(
self.starthorizspace + .5 * (
self.NavKeyWidth),
cvertcenter),
size=(self.NavKeyWidth, self.NavKeyHeight), gaps=True)
self.nextkey = toucharea.ManualKeyDesc(self, 'Nav>' + nextk.name,
nextk.label,
nextk.CmdKeyCol, nextk.CmdCharCol,
nextk.CmdCharCol,
proc=functools.partial(GoToScreen, nextk),
center=(
self.starthorizspace + 1.5 * (
self.NavKeyWidth),
cvertcenter),
size=(self.NavKeyWidth, self.NavKeyHeight), gaps=True)
def ClearScreenTitle(self):
if self.ScreenTitleBlk is None: return
h = self.ScreenTitleBlk.get_height()
self.ScreenTitleBlk = None
self.ScreenTitle = ''
titlegap = h // 10
self.startvertspace = self.startvertspace - h - titlegap
self.useablevertspace = self.useablevertspace + h + titlegap
self.useablevertspacesansnav = self.useablevertspacesansnav + h + titlegap
def SetScreenTitle(self, name, fontsz, color, force=False):
if self.ScreenTitleBlk is not None and not force:
return # User explicitly set a title so don't override it
self.ClearScreenTitle()
self.ScreenTitle = name
self.ScreenTitleBlk = fonts.fonts.Font(fontsz).render(name, 0, wc(color))
h = self.ScreenTitleBlk.get_height()
titlegap = h // 10 # todo is this the best way to space? if fix - fix clear also
self.startvertspace = self.startvertspace + h + titlegap
self.useablevertspace = self.useablevertspace - h - titlegap
self.useablevertspacesansnav = self.useablevertspacesansnav - h - titlegap
def ButSize(self, bpr, bpc, height):
h = self.useablevertspace if height == 0 else height
return (
self.useablehorizspace / bpr, h / bpc)
def PaintNavKeys(self):
for key in self.NavKeys.values():
key.PaintKey()
def PaintKeys(self):
if self.Keys is not None:
for key in self.Keys.values():
if type(key) is not toucharea.TouchPoint:
key.PaintKey()
self.PaintNavKeys()
def ScreenContentRepaint(self):
pass
def AddToHubInterestList(self, hub, item, value):
if hub.name in self.HubInterestList:
self.HubInterestList[hub.name][item] = value
else:
self.HubInterestList[hub.name] = {item: value}
def _PrepScreen(self, nav=None, init=True):
global RunningScreenClock
if self.used:
logsupport.Logs.Log('Attempted reuse (Init: {}) of single use screen {}'.format(init, self.name),
severity=ConsoleError)
if init:
if self.ScreenClock != RunningScreenClock:
RunningScreenClock.pause()
self.ScreenClock.resume()
RunningScreenClock = self.ScreenClock
else:
RunningScreenClock.resume()
if init: self.NavKeys = nav
self.PaintBase()
self.PaintKeys()
if self.ScreenTitleBlk is not None:
self.ScreenTitleBlk, w = self._GenerateTitleBlk(self.ScreenTitle, self.DecodedScreenTitleFields,
self.ScreenTitleColor)
hw.screen.blit(self.ScreenTitleBlk,
(self.starthorizspace + (self.useablehorizspace - w) // 2, self.TopBorder))
self.ScreenContentRepaint()
displayupdate.updatedisplay()
def InitDisplay(self, nav):
self._PrepScreen(nav, True)
def ReInitDisplay(self):
self._PrepScreen(None, False)
def NodeEvent(self, evnt):
if evnt.node is not None:
if evnt.hub != '*VARSTORE*': # var changes can be reported while any screen is up
logsupport.Logs.Log("Unexpected event to screen: ", self.name, ' Hub: ', str(evnt.hub), ' Node: ',
str(evnt.node),
' Val: ', str(evnt.value), severity=ConsoleDetail)
else:
pass
else:
logsupport.Logs.Log(
'Node event to screen {} with no handler node: {} (Event: {})'.format(self.name, evnt.node, evnt),
severity=ConsoleWarning, hb=True, tb=True)
def VarEvent(self, evnt):
pass # var changes can happen with any screen up so if screen doesn't care about vars it doesn't define a handler
# logsupport.Logs.Log('Var event to screen {} with no handler (Event: {})'.format(self.name,evnt), severity=ConsoleError)
def ExitScreen(self, viaPush):
if self.ScreenClock is not None: self.ScreenClock.pause()
for timer in self.ScreenTimers:
if timer[0].is_alive():
timer[0].cancel()
if timer[1] is not None: timer[1]()
self.ScreenTimers = []
if self.singleuse:
if viaPush:
pass
else:
self.userstore.DropStore()
for nm, k in self.Keys.items():
if hasattr(k, 'userstore'): k.userstore.DropStore()
for nm, k in self.NavKeys.items():
if hasattr(k, 'userstore'): k.userstore.DropStore()
self.used = True
def DeleteScreen(self):
# explicit screen destroy
self.userstore.DropStore()
for timer in self.ScreenTimers:
if timer[0].is_alive():
timer[0].cancel()
if timer[1] is not None: timer[1]()
for n, s in self.ChildScreens.items():
s.DeleteScreen()
def PopOver(self):
try:
if self.singleuse:
self.userstore.DropStore()
for nm, k in self.Keys.items():
k.userstore.DropStore()
for nm, k in self.NavKeys.items():
k.userstore.DropStore()
self.used = True
except Exception as E:
logsupport.Logs.Log('Screen sequencing exception for screen {}: {}'.format(self.name, repr(E)),
severity=ConsoleWarning)
def PaintBase(self):
hw.screen.fill(wc(self.BackgroundColor))
lineclr = tint(self.BackgroundColor, tint_factor=.5)
if config.sysStore.NetErrorIndicator:
pygame.draw.circle(hw.screen, tint(self.BackgroundColor, tint_factor=.5),
(self.markradius, self.markradius), self.markradius, 0)
lineclr = wc(self.BackgroundColor)
if config.sysStore.ErrorNotice != -1:
pygame.draw.line(hw.screen, lineclr, (0, self.markradius), (2 * self.markradius, self.markradius), 3)
pygame.draw.line(hw.screen, lineclr, (self.markradius, 0), (self.markradius, 2 * self.markradius), 3)
class BaseKeyScreenDesc(ScreenDesc):
def __init__(self, screensection, screenname, parentscreen=None, SingleUse=False):
super().__init__(screensection, screenname, parentscreen=parentscreen, SingleUse=SingleUse)
AddUndefaultedParams(self, screensection, KeysPerColumn=0, KeysPerRow=0)
self.buttonsperrow = -1
self.buttonspercol = -1
utilities.register_example('BaseKeyScreenDesc', self)
def LayoutKeys(self, extraOffset=0, height=0):
# Compute the positions and sizes for the Keys and store in the Key objects
explicitlayout = self.KeysPerColumn * self.KeysPerRow
if explicitlayout != 0:
# user provided explicit button layout
if explicitlayout >= len(self.Keys):
# user layout provides enough space
bpr, bpc = (self.KeysPerRow, self.KeysPerColumn)
else:
# bad user layout - go with automatic
logsupport.Logs.Log('Bad explicit key layout for: ', self.name, severity=ConsoleWarning)
bpr, bpc = ButLayout(len(self.Keys))
else:
bpr, bpc = ButLayout(
len(self.Keys)) # don't do this if explicit layout spec's because may be more keys than it can handle
self.buttonsperrow = bpr
self.buttonspercol = bpc
buttonsize = self.ButSize(bpr, bpc, height)
hpos = []
vpos = []
for i in range(bpr):
hpos.append(self.starthorizspace + (.5 + i) * buttonsize[0])
for i in range(bpc):
vpos.append(self.startvertspace + extraOffset + (.5 + i) * buttonsize[1])
for i, (kn, key) in enumerate(self.Keys.items()):
key.FinishKey((hpos[i % bpr], vpos[i // bpr]), buttonsize)
|
apache-2.0
| -8,380,265,676,621,377,000
| 35.119403
| 122
| 0.689906
| false
| 3.105408
| true
| false
| false
|
wdv4758h/ZipPy
|
edu.uci.python.benchmark/src/benchmarks/sympy/sympy/integrals/manualintegrate.py
|
2
|
39796
|
"""Integration method that emulates by-hand techniques.
This module also provides functionality to get the steps used to evaluate a
particular integral, in the ``integral_steps`` function. This will return
nested namedtuples representing the integration rules used. The
``manualintegrate`` function computes the integral using those steps given
an integrand; given the steps, ``_manualintegrate`` will evaluate them.
The integrator can be extended with new heuristics and evaluation
techniques. To do so, write a function that accepts an ``IntegralInfo``
object and returns either a namedtuple representing a rule or
``None``. Then, write another function that accepts the namedtuple's fields
and returns the antiderivative, and decorate it with
``@evaluates(namedtuple_type)``. If the new technique requires a new
match, add the key and call to the antiderivative function to integral_steps.
To enable simple substitutions, add the match to find_substitutions.
"""
from __future__ import print_function, division
from collections import namedtuple
import sympy
from sympy.core.compatibility import reduce
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.strategies.core import (switch, identity, do_one, null_safe,
condition, tryit)
def Rule(name, props=""):
# GOTCHA: namedtuple class name not considered!
def __eq__(self, other):
return self.__class__ == other.__class__ and tuple.__eq__(self, other)
__neq__ = lambda self, other: not __eq__(self, other)
cls = namedtuple(name, props + " context symbol")
cls.__eq__ = __eq__
cls.__ne__ = __neq__
return cls
ConstantRule = Rule("ConstantRule", "constant")
ConstantTimesRule = Rule("ConstantTimesRule", "constant other substep")
PowerRule = Rule("PowerRule", "base exp")
AddRule = Rule("AddRule", "substeps")
URule = Rule("URule", "u_var u_func constant substep")
PartsRule = Rule("PartsRule", "u dv v_step second_step")
CyclicPartsRule = Rule("CyclicPartsRule", "parts_rules coefficient")
TrigRule = Rule("TrigRule", "func arg")
ExpRule = Rule("ExpRule", "base exp")
ReciprocalRule = Rule("ReciprocalRule", "func")
ArctanRule = Rule("ArctanRule")
ArcsinRule = Rule("ArcsinRule")
InverseHyperbolicRule = Rule("InverseHyperbolicRule", "func")
AlternativeRule = Rule("AlternativeRule", "alternatives")
DontKnowRule = Rule("DontKnowRule")
DerivativeRule = Rule("DerivativeRule")
RewriteRule = Rule("RewriteRule", "rewritten substep")
PiecewiseRule = Rule("PiecewiseRule", "subfunctions")
HeavisideRule = Rule("HeavisideRule", "func")
TrigSubstitutionRule = Rule("TrigSubstitutionRule", "theta func rewritten substep")
IntegralInfo = namedtuple('IntegralInfo', 'integrand symbol')
evaluators = {}
def evaluates(rule):
def _evaluates(func):
func.rule = rule
evaluators[rule] = func
return func
return _evaluates
def contains_dont_know(rule):
if isinstance(rule, DontKnowRule):
return True
else:
for val in rule:
if isinstance(val, tuple):
if contains_dont_know(val):
return True
elif isinstance(val, list):
if any(contains_dont_know(i) for i in val):
return True
return False
def manual_diff(f, symbol):
"""Derivative of f in form expected by find_substitutions
SymPy's derivatives for some trig functions (like cot) aren't in a form
that works well with finding substitutions; this replaces the
derivatives for those particular forms with something that works better.
"""
if f.args:
arg = f.args[0]
if isinstance(f, sympy.tan):
return arg.diff(symbol) * sympy.sec(arg)**2
elif isinstance(f, sympy.cot):
return -arg.diff(symbol) * sympy.csc(arg)**2
elif isinstance(f, sympy.sec):
return arg.diff(symbol) * sympy.sec(arg) * sympy.tan(arg)
elif isinstance(f, sympy.csc):
return -arg.diff(symbol) * sympy.csc(arg) * sympy.cot(arg)
elif isinstance(f, sympy.Add):
return sum([manual_diff(arg, symbol) for arg in f.args])
elif isinstance(f, sympy.Mul):
if len(f.args) == 2 and isinstance(f.args[0], sympy.Number):
return f.args[0] * manual_diff(f.args[1], symbol)
return f.diff(symbol)
# Method based on that on SIN, described in "Symbolic Integration: The
# Stormy Decade"
def find_substitutions(integrand, symbol, u_var):
results = []
def test_subterm(u, u_diff):
substituted = integrand / u_diff
if symbol not in substituted.free_symbols:
# replaced everything already
return False
substituted = substituted.subs(u, u_var).cancel()
if symbol not in substituted.free_symbols:
return substituted.as_independent(u_var, as_Add=False)
return False
def possible_subterms(term):
if isinstance(term, (TrigonometricFunction,
sympy.asin, sympy.acos, sympy.atan,
sympy.exp, sympy.log, sympy.Heaviside)):
return [term.args[0]]
elif isinstance(term, sympy.Mul):
r = []
for u in term.args:
r.append(u)
r.extend(possible_subterms(u))
return r
elif isinstance(term, sympy.Pow):
if term.args[1].is_constant(symbol):
return [term.args[0]]
elif term.args[0].is_constant(symbol):
return [term.args[1]]
elif isinstance(term, sympy.Add):
r = []
for arg in term.args:
r.append(arg)
r.extend(possible_subterms(arg))
return r
return []
for u in possible_subterms(integrand):
if u == symbol:
continue
u_diff = manual_diff(u, symbol)
new_integrand = test_subterm(u, u_diff)
if new_integrand is not False:
constant, new_integrand = new_integrand
substitution = (u, constant, new_integrand)
if substitution not in results:
results.append(substitution)
return results
def rewriter(condition, rewrite):
"""Strategy that rewrites an integrand."""
def _rewriter(integral):
integrand, symbol = integral
if condition(*integral):
rewritten = rewrite(*integral)
if rewritten != integrand:
substep = integral_steps(rewritten, symbol)
if not isinstance(substep, DontKnowRule):
return RewriteRule(
rewritten,
substep,
integrand, symbol)
return _rewriter
def proxy_rewriter(condition, rewrite):
"""Strategy that rewrites an integrand based on some other criteria."""
def _proxy_rewriter(criteria):
criteria, integral = criteria
integrand, symbol = integral
args = criteria + list(integral)
if condition(*args):
rewritten = rewrite(*args)
if rewritten != integrand:
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol)
return _proxy_rewriter
def multiplexer(conditions):
"""Apply the rule that matches the condition, else None"""
def multiplexer_rl(expr):
for key, rule in conditions.items():
if key(expr):
return rule(expr)
return multiplexer_rl
def alternatives(*rules):
"""Strategy that makes an AlternativeRule out of multiple possible results."""
def _alternatives(integral):
alts = []
for rule in rules:
result = rule(integral)
if (result and not isinstance(result, DontKnowRule) and
result != integral and result not in alts):
alts.append(result)
if len(alts) == 1:
return alts[0]
elif alts:
doable = [rule for rule in alts if not contains_dont_know(rule)]
if doable:
return AlternativeRule(doable, *integral)
else:
return AlternativeRule(alts, *integral)
return _alternatives
def constant_rule(integral):
integrand, symbol = integral
return ConstantRule(integral.integrand, *integral)
def power_rule(integral):
integrand, symbol = integral
base, exp = integrand.as_base_exp()
if symbol not in exp.free_symbols and isinstance(base, sympy.Symbol):
if sympy.simplify(exp + 1) == 0:
return ReciprocalRule(base, integrand, symbol)
return PowerRule(base, exp, integrand, symbol)
elif symbol not in base.free_symbols and isinstance(exp, sympy.Symbol):
rule = ExpRule(base, exp, integrand, symbol)
if sympy.ask(~sympy.Q.zero(sympy.log(base))):
return rule
elif sympy.ask(sympy.Q.zero(sympy.log(base))):
return ConstantRule(1, 1, symbol)
return PiecewiseRule([
(ConstantRule(1, 1, symbol), sympy.Eq(sympy.log(base), 0)),
(rule, True)
], integrand, symbol)
def exp_rule(integral):
integrand, symbol = integral
if isinstance(integrand.args[0], sympy.Symbol):
return ExpRule(sympy.E, integrand.args[0], integrand, symbol)
def inverse_trig_rule(integral):
integrand, symbol = integral
base, exp = integrand.as_base_exp()
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
match = base.match(a + b*symbol**2)
if not match:
return
def negative(x):
return sympy.ask(sympy.Q.negative(x)) or x.is_negative or x.could_extract_minus_sign()
def ArcsinhRule(integrand, symbol):
return InverseHyperbolicRule(sympy.asinh, integrand, symbol)
def ArccoshRule(integrand, symbol):
return InverseHyperbolicRule(sympy.acosh, integrand, symbol)
def make_inverse_trig(RuleClass, base_exp, a, sign_a, b, sign_b):
u_var = sympy.Dummy("u")
current_base = base
current_symbol = symbol
constant = u_func = u_constant = substep = None
factored = integrand
if a != 1:
constant = a**base_exp
current_base = sign_a + sign_b * (b/a) * current_symbol**2
factored = current_base ** base_exp
if (b/a) != 1:
u_func = sympy.sqrt(b/a) * symbol
u_constant = sympy.sqrt(a/b)
current_symbol = u_var
current_base = sign_a + sign_b * current_symbol**2
substep = RuleClass(current_base ** base_exp, current_symbol)
if u_func is not None:
if u_constant != 1:
substep = ConstantTimesRule(
u_constant, current_base ** base_exp, substep,
u_constant * current_base ** base_exp, symbol)
substep = URule(u_var, u_func, u_constant, substep, factored, symbol)
if constant is not None:
substep = ConstantTimesRule(constant, factored, substep, integrand, symbol)
return substep
a, b = match[a], match[b]
# list of (rule, base_exp, a, sign_a, b, sign_b, condition)
possibilities = []
if sympy.simplify(exp + 1) == 0 and not (negative(a) or negative(b)):
possibilities.append((ArctanRule, exp, a, 1, b, 1, sympy.And(a > 0, b > 0)))
elif sympy.simplify(2*exp + 1) == 0:
possibilities.append((ArcsinRule, exp, a, 1, -b, -1, sympy.And(a > 0, b < 0)))
possibilities.append((ArcsinhRule, exp, a, 1, b, 1, sympy.And(a > 0, b > 0)))
possibilities.append((ArccoshRule, exp, -a, -1, b, 1, sympy.And(a < 0, b > 0)))
possibilities = [p for p in possibilities if p[-1] is not sympy.false]
if a.is_number and b.is_number:
possibility = [p for p in possibilities if p[-1] is sympy.true]
if len(possibility) == 1:
return make_inverse_trig(*possibility[0][:-1])
elif possibilities:
return PiecewiseRule(
[(make_inverse_trig(*p[:-1]), p[-1]) for p in possibilities],
integrand, symbol)
def add_rule(integral):
integrand, symbol = integral
return AddRule(
[integral_steps(g, symbol)
for g in integrand.as_ordered_terms()],
integrand, symbol)
def mul_rule(integral):
integrand, symbol = integral
args = integrand.args
# Constant times function case
coeff, f = integrand.as_independent(symbol)
if coeff != 1:
return ConstantTimesRule(
coeff, f,
integral_steps(f, symbol),
integrand, symbol)
def _parts_rule(integrand, symbol):
# LIATE rule:
# log, inverse trig, algebraic (polynomial), trigonometric, exponential
def pull_out_polys(integrand):
integrand = integrand.together()
polys = [arg for arg in integrand.args if arg.is_polynomial(symbol)]
if polys:
u = sympy.Mul(*polys)
dv = integrand / u
return u, dv
def pull_out_u(*functions):
def pull_out_u_rl(integrand):
if any([integrand.has(f) for f in functions]):
args = [arg for arg in integrand.args
if any(isinstance(arg, cls) for cls in functions)]
if args:
u = reduce(lambda a,b: a*b, args)
dv = integrand / u
return u, dv
return pull_out_u_rl
liate_rules = [pull_out_u(sympy.log), pull_out_u(sympy.atan, sympy.asin, sympy.acos),
pull_out_polys, pull_out_u(sympy.sin, sympy.cos),
pull_out_u(sympy.exp)]
dummy = sympy.Dummy("temporary")
# we can integrate log(x) and atan(x) by setting dv = 1
if isinstance(integrand, (sympy.log, sympy.atan, sympy.asin, sympy.acos)):
integrand = dummy * integrand
for index, rule in enumerate(liate_rules):
result = rule(integrand)
if result:
u, dv = result
# Don't pick u to be a constant if possible
if symbol not in u.free_symbols and not u.has(dummy):
return
u = u.subs(dummy, 1)
dv = dv.subs(dummy, 1)
for rule in liate_rules[index + 1:]:
r = rule(integrand)
# make sure dv is amenable to integration
if r and r[0].subs(dummy, 1) == dv:
du = u.diff(symbol)
v_step = integral_steps(dv, symbol)
v = _manualintegrate(v_step)
return u, dv, v, du, v_step
def parts_rule(integral):
integrand, symbol = integral
constant, integrand = integrand.as_coeff_Mul()
result = _parts_rule(integrand, symbol)
steps = []
if result:
u, dv, v, du, v_step = result
steps.append(result)
if isinstance(v, sympy.Integral):
return
while True:
if symbol not in (integrand / (v * du)).cancel().free_symbols:
coefficient = ((v * du) / integrand).cancel()
rule = CyclicPartsRule(
[PartsRule(u, dv, v_step, None, None, None)
for (u, dv, v, du, v_step) in steps],
(-1) ** len(steps) * coefficient,
integrand, symbol
)
if constant != 1:
rule = ConstantTimesRule(constant, integrand, rule,
constant * integrand, symbol)
return rule
result = _parts_rule(v * du, symbol)
if result:
u, dv, v, du, v_step = result
steps.append(result)
else:
break
def make_second_step(steps, integrand):
if steps:
u, dv, v, du, v_step = steps[0]
return PartsRule(u, dv, v_step,
make_second_step(steps[1:], v * du),
integrand, symbol)
else:
return integral_steps(integrand, symbol)
if steps:
u, dv, v, du, v_step = steps[0]
rule = PartsRule(u, dv, v_step,
make_second_step(steps[1:], v * du),
integrand, symbol)
if constant != 1:
rule = ConstantTimesRule(constant, integrand, rule,
constant * integrand, symbol)
return rule
def trig_rule(integral):
integrand, symbol = integral
if isinstance(integrand, sympy.sin) or isinstance(integrand, sympy.cos):
arg = integrand.args[0]
if not isinstance(arg, sympy.Symbol):
return # perhaps a substitution can deal with it
if isinstance(integrand, sympy.sin):
func = 'sin'
else:
func = 'cos'
return TrigRule(func, arg, integrand, symbol)
if integrand == sympy.sec(symbol)**2:
return TrigRule('sec**2', symbol, integrand, symbol)
elif integrand == sympy.csc(symbol)**2:
return TrigRule('csc**2', symbol, integrand, symbol)
if isinstance(integrand, sympy.tan):
rewritten = sympy.sin(*integrand.args) / sympy.cos(*integrand.args)
elif isinstance(integrand, sympy.cot):
rewritten = sympy.cos(*integrand.args) / sympy.sin(*integrand.args)
elif isinstance(integrand, sympy.sec):
arg = integrand.args[0]
rewritten = ((sympy.sec(arg)**2 + sympy.tan(arg) * sympy.sec(arg)) /
(sympy.sec(arg) + sympy.tan(arg)))
elif isinstance(integrand, sympy.csc):
arg = integrand.args[0]
rewritten = ((sympy.csc(arg)**2 + sympy.cot(arg) * sympy.csc(arg)) /
(sympy.csc(arg) + sympy.cot(arg)))
else:
return
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol
)
def trig_product_rule(integral):
integrand, symbol = integral
sectan = sympy.sec(symbol) * sympy.tan(symbol)
q = integrand / sectan
if symbol not in q.free_symbols:
rule = TrigRule('sec*tan', symbol, sectan, symbol)
if q != 1:
rule = ConstantTimesRule(q, sectan, rule, integrand, symbol)
return rule
csccot = -sympy.csc(symbol) * sympy.cot(symbol)
q = integrand / csccot
if symbol not in q.free_symbols:
rule = TrigRule('csc*cot', symbol, csccot, symbol)
if q != 1:
rule = ConstantTimesRule(q, csccot, rule, integrand, symbol)
return rule
def heaviside_rule(integral):
integrand, symbol = integral
if isinstance(integrand.args[0], sympy.Symbol):
return HeavisideRule(integrand.args[0], integrand, symbol)
# else perhaps substitution can handle this
@sympy.cacheit
def make_wilds(symbol):
a = sympy.Wild('a', exclude=[symbol])
b = sympy.Wild('b', exclude=[symbol])
m = sympy.Wild('m', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)])
n = sympy.Wild('n', exclude=[symbol], properties=[lambda n: isinstance(n, sympy.Integer)])
return a, b, m, n
@sympy.cacheit
def sincos_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.sin(a*symbol)**m * sympy.cos(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def tansec_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.tan(a*symbol)**m * sympy.sec(b*symbol)**n
return pattern, a, b, m, n
@sympy.cacheit
def cotcsc_pattern(symbol):
a, b, m, n = make_wilds(symbol)
pattern = sympy.cot(a*symbol)**m * sympy.csc(b*symbol)**n
return pattern, a, b, m, n
def uncurry(func):
def uncurry_rl(args):
return func(*args)
return uncurry_rl
def trig_rewriter(rewrite):
def trig_rewriter_rl(args):
a, b, m, n, integrand, symbol = args
rewritten = rewrite(a, b, m, n, integrand, symbol)
if rewritten != integrand:
return RewriteRule(
rewritten,
integral_steps(rewritten, symbol),
integrand, symbol)
return trig_rewriter_rl
sincos_botheven_condition = uncurry(
lambda a, b, m, n, i, s: m.is_even and n.is_even and
m.is_nonnegative and n.is_nonnegative)
sincos_botheven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (((1 - sympy.cos(2*a*symbol)) / 2) ** (m / 2)) *
(((1 + sympy.cos(2*b*symbol)) / 2) ** (n / 2)) ))
sincos_sinodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd and m >= 3)
sincos_sinodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sympy.cos(a*symbol)**2)**((m - 1) / 2) *
sympy.sin(a*symbol) *
sympy.cos(b*symbol) ** n))
sincos_cosodd_condition = uncurry(lambda a, b, m, n, i, s: n.is_odd and n >= 3)
sincos_cosodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 - sympy.sin(b*symbol)**2)**((n - 1) / 2) *
sympy.cos(b*symbol) *
sympy.sin(a*symbol) ** m))
tansec_seceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
tansec_seceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + sympy.tan(b*symbol)**2) ** (n/2 - 1) *
sympy.sec(b*symbol)**2 *
sympy.tan(a*symbol) ** m ))
tansec_tanodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
tansec_tanodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sympy.sec(a*symbol)**2 - 1) ** ((m - 1) / 2) *
sympy.tan(a*symbol) *
sympy.sec(b*symbol) ** n ))
tan_tansquared_condition = uncurry(lambda a, b, m, n, i, s: m == 2 and n == 0)
tan_tansquared = trig_rewriter(
lambda a, b, m, n, i, symbol: ( sympy.sec(a*symbol)**2 - 1))
cotcsc_csceven_condition = uncurry(lambda a, b, m, n, i, s: n.is_even and n >= 4)
cotcsc_csceven = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (1 + sympy.cot(b*symbol)**2) ** (n/2 - 1) *
sympy.csc(b*symbol)**2 *
sympy.cot(a*symbol) ** m ))
cotcsc_cotodd_condition = uncurry(lambda a, b, m, n, i, s: m.is_odd)
cotcsc_cotodd = trig_rewriter(
lambda a, b, m, n, i, symbol: ( (sympy.csc(a*symbol)**2 - 1) ** ((m - 1) / 2) *
sympy.cot(a*symbol) *
sympy.csc(b*symbol) ** n ))
def trig_sincos_rule(integral):
integrand, symbol = integral
if any(integrand.has(f) for f in (sympy.sin, sympy.cos)):
pattern, a, b, m, n = sincos_pattern(symbol)
match = integrand.match(pattern)
if match:
a, b, m, n = match.get(a, 0), match.get(b, 0), match.get(m, 0), match.get(n, 0)
return multiplexer({
sincos_botheven_condition: sincos_botheven,
sincos_sinodd_condition: sincos_sinodd,
sincos_cosodd_condition: sincos_cosodd
})((a, b, m, n, integrand, symbol))
def trig_tansec_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sympy.cos(symbol): sympy.sec(symbol)
})
if any(integrand.has(f) for f in (sympy.tan, sympy.sec)):
pattern, a, b, m, n = tansec_pattern(symbol)
match = integrand.match(pattern)
if match:
a, b, m, n = match.get(a, 0),match.get(b, 0), match.get(m, 0), match.get(n, 0)
return multiplexer({
tansec_tanodd_condition: tansec_tanodd,
tansec_seceven_condition: tansec_seceven,
tan_tansquared_condition: tan_tansquared
})((a, b, m, n, integrand, symbol))
def trig_cotcsc_rule(integral):
integrand, symbol = integral
integrand = integrand.subs({
1 / sympy.sin(symbol): sympy.csc(symbol),
1 / sympy.tan(symbol): sympy.cot(symbol),
sympy.cos(symbol) / sympy.tan(symbol): sympy.cot(symbol)
})
if any(integrand.has(f) for f in (sympy.cot, sympy.csc)):
pattern, a, b, m, n = cotcsc_pattern(symbol)
match = integrand.match(pattern)
if match:
a, b, m, n = match.get(a, 0),match.get(b, 0), match.get(m, 0), match.get(n, 0)
return multiplexer({
cotcsc_cotodd_condition: cotcsc_cotodd,
cotcsc_csceven_condition: cotcsc_csceven
})((a, b, m, n, integrand, symbol))
def trig_powers_products_rule(integral):
return do_one(null_safe(trig_sincos_rule),
null_safe(trig_tansec_rule),
null_safe(trig_cotcsc_rule))(integral)
def trig_substitution_rule(integral):
integrand, symbol = integral
a = sympy.Wild('a', exclude=[0, symbol])
b = sympy.Wild('b', exclude=[0, symbol])
theta = sympy.Dummy("theta")
matches = integrand.find(a + b*symbol**2)
if matches:
for expr in matches:
match = expr.match(a + b*symbol**2)
a = match[a]
b = match[b]
a_positive = ((a.is_number and a > 0) or a.is_positive)
b_positive = ((b.is_number and b > 0) or b.is_positive)
x_func = None
if a_positive and b_positive:
# a**2 + b*x**2
x_func = (sympy.sqrt(a)/sympy.sqrt(b)) * sympy.tan(theta)
elif a_positive and not b_positive:
# a**2 - b*x**2
x_func = (sympy.sqrt(a)/sympy.sqrt(-b)) * sympy.sin(theta)
elif not a_positive and b_positive:
# b*x**2 - a**2
x_func = (sympy.sqrt(-a)/sympy.sqrt(b)) * sympy.sec(theta)
if x_func:
replaced = integrand.subs(symbol, x_func).trigsimp()
if not replaced.has(symbol):
replaced *= manual_diff(x_func, theta)
replaced = replaced.trigsimp()
secants = replaced.find(1/sympy.cos(theta))
if secants:
replaced = replaced.xreplace({
1/sympy.cos(theta): sympy.sec(theta)
})
substep = integral_steps(replaced, theta)
if not contains_dont_know(substep):
return TrigSubstitutionRule(
theta, x_func, replaced, substep, integrand, symbol)
def substitution_rule(integral):
integrand, symbol = integral
u_var = sympy.Dummy("u")
substitutions = find_substitutions(integrand, symbol, u_var)
if substitutions:
ways = []
for u_func, c, substituted in substitutions:
subrule = integral_steps(substituted, u_var)
if contains_dont_know(subrule):
continue
if sympy.simplify(c - 1) != 0:
_, denom = c.as_numer_denom()
subrule = ConstantTimesRule(c, substituted, subrule, substituted, u_var)
if denom.free_symbols:
piecewise = []
could_be_zero = []
if isinstance(denom, sympy.Mul):
could_be_zero = denom.args
else:
could_be_zero.append(denom)
for expr in could_be_zero:
if not sympy.ask(~sympy.Q.zero(expr)):
substep = integral_steps(integrand.subs(expr, 0), symbol)
if substep:
piecewise.append((
substep,
sympy.Eq(expr, 0)
))
piecewise.append((subrule, True))
subrule = PiecewiseRule(piecewise, substituted, symbol)
ways.append(URule(u_var, u_func, c,
subrule,
integrand, symbol))
if len(ways) > 1:
return AlternativeRule(ways, integrand, symbol)
elif ways:
return ways[0]
elif integrand.has(sympy.exp):
u_func = sympy.exp(symbol)
c = 1
substituted = integrand / u_func.diff(symbol)
substituted = substituted.subs(u_func, u_var)
if symbol not in substituted.free_symbols:
return URule(u_var, u_func, c,
integral_steps(substituted, u_var),
integrand, symbol)
partial_fractions_rule = rewriter(
lambda integrand, symbol: integrand.is_rational_function(),
lambda integrand, symbol: integrand.apart(symbol))
distribute_expand_rule = rewriter(
lambda integrand, symbol: (
all(arg.is_Pow or arg.is_polynomial(symbol) for arg in integrand.args)
or isinstance(integrand, sympy.Pow)
or isinstance(integrand, sympy.Mul)),
lambda integrand, symbol: integrand.expand())
def derivative_rule(integral):
variables = integral[0].args[1:]
if variables[-1] == integral.symbol:
return DerivativeRule(*integral)
else:
return ConstantRule(integral.integrand, *integral)
def rewrites_rule(integral):
integrand, symbol = integral
if integrand.match(1/sympy.cos(symbol)):
rewritten = integrand.subs(1/sympy.cos(symbol), sympy.sec(symbol))
return RewriteRule(rewritten, integral_steps(rewritten, symbol), integrand, symbol)
def fallback_rule(integral):
return DontKnowRule(*integral)
# Cache is used to break cyclic integrals
_integral_cache = {}
def integral_steps(integrand, symbol, **options):
"""Returns the steps needed to compute an integral.
This function attempts to mirror what a student would do by hand as
closely as possible.
SymPy Gamma uses this to provide a step-by-step explanation of an
integral. The code it uses to format the results of this function can be
found at
https://github.com/sympy/sympy_gamma/blob/master/app/logic/intsteps.py.
Examples
========
>>> from sympy import exp, sin, cos
>>> from sympy.integrals.manualintegrate import integral_steps
>>> from sympy.abc import x
>>> print(repr(integral_steps(exp(x) / (1 + exp(2 * x)), x))) \
# doctest: +NORMALIZE_WHITESPACE
URule(u_var=_u, u_func=exp(x), constant=1,
substep=ArctanRule(context=1/(_u**2 + 1), symbol=_u),
context=exp(x)/(exp(2*x) + 1), symbol=x)
>>> print(repr(integral_steps(sin(x), x))) \
# doctest: +NORMALIZE_WHITESPACE
TrigRule(func='sin', arg=x, context=sin(x), symbol=x)
>>> print(repr(integral_steps((x**2 + 3)**2 , x))) \
# doctest: +NORMALIZE_WHITESPACE
RewriteRule(rewritten=x**4 + 6*x**2 + 9,
substep=AddRule(substeps=[PowerRule(base=x, exp=4, context=x**4, symbol=x),
ConstantTimesRule(constant=6, other=x**2,
substep=PowerRule(base=x, exp=2, context=x**2, symbol=x),
context=6*x**2, symbol=x),
ConstantRule(constant=9, context=9, symbol=x)],
context=x**4 + 6*x**2 + 9, symbol=x), context=(x**2 + 3)**2, symbol=x)
Returns
=======
rule : namedtuple
The first step; most rules have substeps that must also be
considered. These substeps can be evaluated using ``manualintegrate``
to obtain a result.
"""
cachekey = (integrand, symbol)
if cachekey in _integral_cache:
if _integral_cache[cachekey] is None:
# cyclic integral! null_safe will eliminate that path
return None
else:
return _integral_cache[cachekey]
else:
_integral_cache[cachekey] = None
integral = IntegralInfo(integrand, symbol)
def key(integral):
integrand = integral.integrand
if isinstance(integrand, TrigonometricFunction):
return TrigonometricFunction
elif isinstance(integrand, sympy.Derivative):
return sympy.Derivative
elif symbol not in integrand.free_symbols:
return sympy.Number
else:
for cls in (sympy.Pow, sympy.Symbol, sympy.exp, sympy.log,
sympy.Add, sympy.Mul, sympy.atan, sympy.asin, sympy.acos, sympy.Heaviside):
if isinstance(integrand, cls):
return cls
def integral_is_subclass(*klasses):
def _integral_is_subclass(integral):
k = key(integral)
return k and issubclass(k, klasses)
return _integral_is_subclass
result = do_one(
null_safe(switch(key, {
sympy.Pow: do_one(null_safe(power_rule), null_safe(inverse_trig_rule)),
sympy.Symbol: power_rule,
sympy.exp: exp_rule,
sympy.Add: add_rule,
sympy.Mul: do_one(null_safe(mul_rule), null_safe(trig_product_rule)),
sympy.Derivative: derivative_rule,
TrigonometricFunction: trig_rule,
sympy.Heaviside: heaviside_rule,
sympy.Number: constant_rule
})),
do_one(
null_safe(trig_rule),
null_safe(alternatives(
rewrites_rule,
substitution_rule,
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
partial_fractions_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.log, sympy.atan, sympy.asin, sympy.acos),
parts_rule),
condition(
integral_is_subclass(sympy.Mul, sympy.Pow),
distribute_expand_rule),
trig_powers_products_rule
)),
null_safe(trig_substitution_rule)
),
fallback_rule)(integral)
del _integral_cache[cachekey]
return result
@evaluates(ConstantRule)
def eval_constant(constant, integrand, symbol):
return constant * symbol
@evaluates(ConstantTimesRule)
def eval_constanttimes(constant, other, substep, integrand, symbol):
return constant * _manualintegrate(substep)
@evaluates(PowerRule)
def eval_power(base, exp, integrand, symbol):
return (base ** (exp + 1)) / (exp + 1)
@evaluates(ExpRule)
def eval_exp(base, exp, integrand, symbol):
return integrand / sympy.ln(base)
@evaluates(AddRule)
def eval_add(substeps, integrand, symbol):
return sum(map(_manualintegrate, substeps))
@evaluates(URule)
def eval_u(u_var, u_func, constant, substep, integrand, symbol):
result = _manualintegrate(substep)
return result.subs(u_var, u_func)
@evaluates(PartsRule)
def eval_parts(u, dv, v_step, second_step, integrand, symbol):
v = _manualintegrate(v_step)
return u * v - _manualintegrate(second_step)
@evaluates(CyclicPartsRule)
def eval_cyclicparts(parts_rules, coefficient, integrand, symbol):
coefficient = 1 - coefficient
result = []
sign = 1
for rule in parts_rules:
result.append(sign * rule.u * _manualintegrate(rule.v_step))
sign *= -1
return sympy.Add(*result) / coefficient
@evaluates(TrigRule)
def eval_trig(func, arg, integrand, symbol):
if func == 'sin':
return -sympy.cos(arg)
elif func == 'cos':
return sympy.sin(arg)
elif func == 'sec*tan':
return sympy.sec(arg)
elif func == 'csc*cot':
return sympy.csc(arg)
elif func == 'sec**2':
return sympy.tan(arg)
elif func == 'csc**2':
return -sympy.cot(arg)
@evaluates(ReciprocalRule)
def eval_reciprocal(func, integrand, symbol):
return sympy.ln(func)
@evaluates(ArctanRule)
def eval_arctan(integrand, symbol):
return sympy.atan(symbol)
@evaluates(ArcsinRule)
def eval_arcsin(integrand, symbol):
return sympy.asin(symbol)
@evaluates(InverseHyperbolicRule)
def eval_inversehyperbolic(func, integrand, symbol):
return func(symbol)
@evaluates(AlternativeRule)
def eval_alternative(alternatives, integrand, symbol):
return _manualintegrate(alternatives[0])
@evaluates(RewriteRule)
def eval_rewrite(rewritten, substep, integrand, symbol):
return _manualintegrate(substep)
@evaluates(PiecewiseRule)
def eval_piecewise(substeps, integrand, symbol):
return sympy.Piecewise(*[(_manualintegrate(substep), cond)
for substep, cond in substeps])
@evaluates(TrigSubstitutionRule)
def eval_trigsubstitution(theta, func, rewritten, substep, integrand, symbol):
func = func.subs(sympy.sec(theta), 1/sympy.cos(theta))
trig_function = list(func.find(TrigonometricFunction))
assert len(trig_function) == 1
trig_function = trig_function[0]
relation = sympy.solve(symbol - func, trig_function)
assert len(relation) == 1
numer, denom = sympy.fraction(relation[0])
if isinstance(trig_function, sympy.sin):
opposite = numer
hypotenuse = denom
adjacent = sympy.sqrt(denom**2 - numer**2)
inverse = sympy.asin(relation[0])
elif isinstance(trig_function, sympy.cos):
adjacent = numer
hypotenuse = denom
opposite = sympy.sqrt(denom**2 - numer**2)
inverse = sympy.acos(relation[0])
elif isinstance(trig_function, sympy.tan):
opposite = numer
adjacent = denom
hypotenuse = sympy.sqrt(denom**2 + numer**2)
inverse = sympy.atan(relation[0])
substitution = [
(sympy.sin(theta), opposite/hypotenuse),
(sympy.cos(theta), adjacent/hypotenuse),
(sympy.tan(theta), opposite/adjacent),
(theta, inverse)
]
return _manualintegrate(substep).subs(substitution).trigsimp()
@evaluates(DerivativeRule)
def eval_derivativerule(integrand, symbol):
# isinstance(integrand, Derivative) should be True
if len(integrand.args) == 2:
return integrand.args[0]
else:
return sympy.Derivative(integrand.args[0], *integrand.args[1:-1])
@evaluates(HeavisideRule)
def eval_heaviside(arg, integrand, symbol):
# this result can also be represented as sympy.Max(0, arg)
return arg*sympy.Heaviside(arg)
@evaluates(DontKnowRule)
def eval_dontknowrule(integrand, symbol):
return sympy.Integral(integrand, symbol)
def _manualintegrate(rule):
evaluator = evaluators.get(rule.__class__)
if not evaluator:
raise ValueError("Cannot evaluate rule %s" % repr(rule))
return evaluator(*rule)
def manualintegrate(f, var):
"""manualintegrate(f, var)
Compute indefinite integral of a single variable using an algorithm that
resembles what a student would do by hand.
Unlike ``integrate``, var can only be a single symbol.
Examples
========
>>> from sympy import sin, cos, tan, exp, log, integrate
>>> from sympy.integrals.manualintegrate import manualintegrate
>>> from sympy.abc import x
>>> manualintegrate(1 / x, x)
log(x)
>>> integrate(1/x)
log(x)
>>> manualintegrate(log(x), x)
x*log(x) - x
>>> integrate(log(x))
x*log(x) - x
>>> manualintegrate(exp(x) / (1 + exp(2 * x)), x)
atan(exp(x))
>>> integrate(exp(x) / (1 + exp(2 * x)))
RootSum(4*_z**2 + 1, Lambda(_i, _i*log(2*_i + exp(x))))
>>> manualintegrate(cos(x)**4 * sin(x), x)
-cos(x)**5/5
>>> integrate(cos(x)**4 * sin(x), x)
-cos(x)**5/5
>>> manualintegrate(cos(x)**4 * sin(x)**3, x)
cos(x)**7/7 - cos(x)**5/5
>>> integrate(cos(x)**4 * sin(x)**3, x)
cos(x)**7/7 - cos(x)**5/5
>>> manualintegrate(tan(x), x)
-log(cos(x))
>>> integrate(tan(x), x)
-log(sin(x)**2 - 1)/2
See Also
========
sympy.integrals.integrals.integrate
sympy.integrals.integrals.Integral.doit
sympy.integrals.integrals.Integral
"""
return _manualintegrate(integral_steps(f, var))
|
bsd-3-clause
| -7,936,689,559,096,110,000
| 35.01448
| 99
| 0.585561
| false
| 3.618805
| false
| false
| false
|
sipdbg/sipdbg
|
logger.py
|
1
|
1803
|
import sys
import logging
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace(
"$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
GREY, RED, GREEN, YELLOW , BLUE, PURPLE, AZUR, WHITE, BLACK = range (9)
COLORS = {
'DEBUG' : YELLOW,
'INFO' : GREEN,
'WARNING' : RED,
'ERROR' : BLACK,
'CRITICAL' : BLACK
}
class ColoredFormatter (logging.Formatter):
def __init__ (self, msg, use_color = True):
logging.Formatter.__init__ (self, msg)
self.use_color = use_color
def format (self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS [levelname]) + levelname [:1] + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format (self, record)
class ColoredLogger (logging.Logger):
FORMAT = "[%(levelname)s] %(message)s"
COLOR_FORMAT = formatter_message (FORMAT, True)
def __init__ (self, name):
logging.Logger.__init__ (self, name, logging.INFO)
color_formatter = ColoredFormatter (self.COLOR_FORMAT)
console = logging.StreamHandler (sys.stdout)
console.setFormatter (color_formatter)
self.addHandler (console)
return
if '__main__' == __name__:
logging.setLoggerClass (ColoredLogger)
logger = ColoredLogger ("MyTestLogger")
logger.debug ("debugmsg")
logger.info ("infomsg")
logger.warn ("warnmsg")
logger.error ("errormsg")
# http://docs.python.org/2/library/logging.handlers.html#memoryhandler
|
gpl-2.0
| -2,628,304,185,319,130,000
| 31.196429
| 96
| 0.621742
| false
| 3.500971
| false
| false
| false
|
forrestgtran/TeamX
|
handwritingRecognition/performRecognitionAlpha.py
|
1
|
2865
|
# Import the modules
import cv2
from sklearn.externals import joblib
from skimage.feature import hog
import numpy as np
from PIL import Image
# Load the classifier
clf = joblib.load("digits_cls_alpha6.pkl")
# Read the input image
im = cv2.imread("adrienne2b.jpg")
im_pil = Image.open("adrienne2b.jpg")
# Convert to grayscale and apply Gaussian filtering
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0)
# Threshold the image
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
# Find contours in the image
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get rectangles contains each contour
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
# store tuples of size 2 digit/alpha bounding boxes
# the two elements in the tuple represent the vertices of the bounding box on the image
digit_alphabet_bounding_boxes = []
# cropped_images = []
# For each rectangular region, calculate HOG features and predict
# the digit using Linear SVM.
for rect in rects:
# Draw the rectangles
cv2.rectangle(im, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
v1 = (rect[0], rect[1])
v2 = (rect[0] + rect[2], rect[1] + rect[3])
# append bounding box
# digit_alphabet_bounding_boxes.append((v1,v2))
print "v1"
print rect[0]
print rect[1]
print " - - - "
print "v2"
print rect[0] + rect[2]
print rect[1] + rect[3]
print " - - - "
print "rect[0]", rect[0]
print "rect[1]", rect[1]
print "rect[2]", rect[2]
print "rect[3]", rect[3]
box = (rect[0], rect[1], rect[0] + rect[2], rect[1] + rect[3])
digit_alphabet_bounding_boxes.append(box)
# Make the rectangular region around the digit
leng = int(rect[3] * 1.6)
pt1 = int(rect[1] + rect[3] // 2 - leng // 2)
pt2 = int(rect[0] + rect[2] // 2 - leng // 2)
roi = im_th[pt1:pt1+leng, pt2:pt2+leng]
# Resize the image
roi = cv2.resize(roi, (28, 28), interpolation=cv2.INTER_AREA)
roi = cv2.dilate(roi, (3, 3))
# Calculate the HOG features
roi_hog_fd = hog(roi, orientations=9, pixels_per_cell=(14, 14), cells_per_block=(1, 1), visualise=False)
nbr = clf.predict(np.array([roi_hog_fd], 'float64'))
cv2.putText(im, str(nbr[0]), (rect[0], rect[1]),cv2.FONT_HERSHEY_DUPLEX, 2, (0, 255, 255), 3)
print "# # # # IDENTIFIED ITEM", str(nbr[0])
# ^ ^ IDENTIFIED NUMBER = str(int(nbr[0]))
digit_alphabet_bounding_boxes2 = sorted(digit_alphabet_bounding_boxes, key=lambda x: x[0])
i=0
for item in digit_alphabet_bounding_boxes2:
temp_region = im_pil.crop(item)
temp_str = 'jeremy2region' + str(i)
temp_region.save(temp_str, 'jpeg')
i += 1
cv2.imshow("Resulting Image with Rectangular ROIs", im)
cv2.waitKey()
# input: Image
# output: number
|
apache-2.0
| -8,676,434,699,366,484,000
| 27.65
| 108
| 0.647469
| false
| 2.739006
| false
| false
| false
|
8l/luz-cpu
|
luz_asm_sim/lib/simlib/interactive_cli.py
|
1
|
4504
|
# Interactive command-line simulation functions
#
# Luz micro-controller simulator
# Eli Bendersky (C) 2008-2010
#
import sys
from .luzsim import LuzSim
from ..asmlib.disassembler import disassemble
from ..asmlib.asm_instructions import register_alias_of
from ..commonlib.utils import word2bytes
from ..commonlib.portability import printme, get_input
def print_regs(sim, replace_alias=True):
for i in range(32):
if replace_alias:
regname = register_alias_of[i]
else:
regname = "$r%s" % i
printme('%-5s = 0x%08X' % (regname, sim.reg_value(i)))
if i % 4 == 3:
printme('\n')
else:
printme(' ')
printme('\n')
def do_step(sim):
instr_word = sim.memory.read_instruction(sim.pc)
sim.step()
def show_memory(sim, addr):
for linenum in range(4):
printme("0x%08X: " % (addr + linenum * 16,))
for wordnum in range(4):
waddr = addr + linenum * 16 + wordnum * 4
memword = sim.memory.read_mem(waddr, width=4)
bytes = word2bytes(memword)
for b in bytes:
printme("%02X" % b)
printme(' ')
printme('\n')
help_message = r'''
Supported commands:
s [nsteps] Single step. If 'nsteps' is specified, then 'nsteps'
steps are done.
r Print the contents of all registers
sr Single step and print the contents of all registers
m <addr> Show memory contents at <addr>
rst Restart the simulator
? or help Print this help message
q Quit the simulator
set <param> <value>
Set parameter value (see next section)
Parameters:
alias 1 to show alias names of registers, 0 to show plain
register names.
'''
def print_help():
printme(help_message + '\n')
def interactive_cli_sim(img):
""" An interactive command-line simulation.
img: Executable image
"""
sim = LuzSim(img)
printme('\nLUZ simulator started at 0x%08X\n\n' % sim.pc)
params = {
'alias': True,
}
while True:
try:
# show the current instruction
instr_disasm = disassemble(
word=sim.memory.read_instruction(sim.pc),
replace_alias=params['alias'])
# get a command from the user
line = get_input('[0x%08X] [%s] >> ' % (sim.pc, instr_disasm)).strip()
# skip empty lines
if not line.strip():
continue
cmd, args = parse_cmd(line)
if cmd == 's':
if len(args) >= 1:
nsteps = int(args[0])
else:
nsteps = 1
for i in range(nsteps):
do_step(sim)
elif cmd == 'q':
return
elif cmd == 'rst':
sim.restart()
printme('Restarted\n')
elif cmd == 'r':
print_regs(sim, replace_alias=params['alias'])
elif cmd == 'sr':
do_step(sim)
print_regs(sim, replace_alias=params['alias'])
elif cmd == 'm':
addr = args[0]
show_memory(sim, eval(addr))
elif cmd == 'set':
if len(args) != 2:
printme("Error: invalid command\n")
continue
param, value = args[0], args[1]
if param in params:
params[param] = eval(value)
else:
printme("Error: no such parameter '%s'\n" % param)
elif cmd == '?' or cmd == 'help':
print_help()
else:
printme('Unknown command. To get some help, type ? or help\n')
except Exception:
e = sys.exc_info()[1]
printme('\n!!ERROR!!: %s %s\n' % (type(e), str(e)))
def parse_cmd(line):
""" Parses a command
"""
tokens = [t.strip() for t in line.split()]
return tokens[0], tokens[1:]
|
unlicense
| -4,718,183,565,284,314,000
| 26.687898
| 82
| 0.461146
| false
| 4.162662
| false
| false
| false
|
geonexus/fiware-cloto
|
fiware_cloto/cloto/tests/acceptance_tests/component/tenant_information/features/tenant_information.py
|
1
|
4559
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright 2014 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
#
__author__ = 'arobres'
# -*- coding: utf-8 -*-
from lettuce import step, world, before
from commons.rest_utils import RestUtils
from commons.constants import TENANT_DOC, TENANT_OWNER, TENANT_VERSION, TENANT_WSIZE, TENANT_DEFAULT_DOC
from commons.configuration import HEADERS, TENANT_ID
import commons.utils as Utils
import commons.authentication as Auth
api_utils = RestUtils()
@before.each_feature
def setup_feature(feature):
token_id, world.tenant_id = Auth.get_token()
HEADERS['X-Auth-Token'] = token_id
@before.each_scenario
def setup(scenario):
#Set default headers with correct token before every scenario
world.headers = HEADERS
@step(u'the tenant "([^"]*)"')
def set_tenant_id(step, tenant_id):
world.tenant_id = tenant_id
@step(u'created tenant')
def set_default_tenant(step):
#Set default tenant_id as a global variable
world.tenant_id = TENANT_ID
@step(u'I retrieve the tenant information')
def retrieve_tenant_information(step):
world.req = api_utils.retrieve_information(tenant_id=world.tenant_id, headers=world.headers)
@step(u'I get the following information:')
def check_tenant_information(step):
assert world.req.ok, 'Invalid HTTP status code. Status Code obtained is: {}'.format(world.req.status_code)
response = Utils.assert_json_format(world.req)
for expected_result in step.hashes:
assert response[TENANT_DOC] == TENANT_DEFAULT_DOC, 'Expected {} is: {} \n Obtained {} is: ' \
'{}'.format(TENANT_DOC, TENANT_DEFAULT_DOC,
TENANT_DOC, response[TENANT_DOC])
assert response[TENANT_OWNER] == expected_result[TENANT_OWNER], 'Expected {} is: {} \n Obtained {} is: ' \
'{}'.format(TENANT_OWNER,
expected_result[TENANT_OWNER],
TENANT_OWNER,
response[TENANT_OWNER])
assert TENANT_VERSION in response, 'API Version not found in the response'
assert TENANT_WSIZE in response, 'WindowSize value not found in the API response'
@step(u'I obtain an "([^"]*)" and the "([^"]*)"')
def assert_error_response(step, error_code, fault_element):
Utils.assert_error_code_error(response=world.req, expected_error_code=error_code,
expected_fault_element=fault_element)
@step(u'incorrect "([^"]*)"')
def set_incorrect_token(step, token):
#Set and incorrect header to obtain unauthorized error
world.headers = Utils.create_header(token=token)
@step(u'I update the "([^"]*)"')
def update_window_size(step, window_size):
try:
world.window_size = int(window_size)
except ValueError:
print 'Window Size can not be converted to integer'
world.window_size = window_size
world.req = api_utils.update_window_size(tenant_id=world.tenant_id, window_size=world.window_size,
headers=world.headers)
@step(u'the "([^"]*)" is update in Policy Manager')
def assert_window_size(step, window_size):
assert world.req.ok, str(world.req.status_code) + world.req.content
response = Utils.assert_json_format(world.req)
assert str(response[TENANT_WSIZE]) == window_size
world.req = api_utils.retrieve_information(tenant_id=world.tenant_id, headers=world.headers)
response = Utils.assert_json_format(world.req)
assert str(response[TENANT_WSIZE]) == window_size
|
apache-2.0
| -3,880,594,550,653,137,000
| 33.263158
| 114
| 0.633531
| false
| 3.861864
| false
| false
| false
|
ingolemo/cmd-utils
|
timepick.py
|
1
|
1388
|
#!/usr/bin/env python
"""Usage: timepick [second|minute|hour|day|week]
A filter that picks a single line from stdin based on the current time.
This has an advantage over random selection in that it's cyclical and
thus no clustering or repetition of the selections; seems 'more random'
to people.
"""
import os
import sys
import datetime
pdict = {
"second": lambda a: (a.days * 24 * 60 * 60) + a.seconds,
"minute": lambda a: (a.days * 24 * 60) + (a.seconds / 60),
"hour": lambda a: (a.days * 24) + (a.seconds / 3600),
"day": lambda a: a.days,
"week": lambda a: a.days / 7,
}
def numSinceEpoch(period):
td = datetime.datetime.now() - datetime.datetime.fromtimestamp(0)
return abs(int(pdict[period](td)))
def main(argv):
if not argv[1:] or "-h" in argv or "--help" in argv:
return __doc__
try:
div = argv[1]
if div.endswith("s"):
div = div[:-1]
except IndexError:
div = object()
if div not in pdict:
return "usage: {0} [{1}]".format(
os.path.basename(argv[0]), "|".join(sorted(pdict.keys()))
)
choices = sys.stdin.readlines()
try:
lineno = numSinceEpoch(div) % len(choices)
except ZeroDivisionError:
pass
else:
choice = choices[lineno].strip()
print(choice)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
gpl-3.0
| -4,550,585,445,860,526,000
| 23.785714
| 71
| 0.59366
| false
| 3.344578
| false
| false
| false
|
mozilla/kuma
|
kuma/search/tests/__init__.py
|
1
|
2536
|
from django.conf import settings
from elasticsearch.exceptions import ConnectionError
from elasticsearch_dsl.connections import connections
from rest_framework.test import APIRequestFactory
from kuma.core.i18n import activate_language_from_request
from kuma.users.tests import UserTestCase
from kuma.wiki.search import WikiDocumentType
from ..models import Index
factory = APIRequestFactory()
class ElasticTestCase(UserTestCase):
"""Base class for Elastic Search tests, providing some conveniences"""
@classmethod
def setUpClass(cls):
super(ElasticTestCase, cls).setUpClass()
if not getattr(settings, 'ES_URLS', None):
cls.skipme = True
return
try:
connections.get_connection().cluster.health()
except ConnectionError:
cls.skipme = True
return
cls._old_es_index_prefix = settings.ES_INDEX_PREFIX
settings.ES_INDEX_PREFIX = 'test-%s' % settings.ES_INDEX_PREFIX
cls._old_es_live_index = settings.ES_LIVE_INDEX
settings.ES_LIVE_INDEX = True
@classmethod
def tearDownClass(cls):
super(ElasticTestCase, cls).tearDownClass()
if not cls.skipme:
# Restore old setting.
settings.ES_INDEX_PREFIX = cls._old_es_index_prefix
settings.ES_LIVE_INDEX = cls._old_es_live_index
def setUp(self):
super(ElasticTestCase, self).setUp()
self.setup_indexes()
def tearDown(self):
super(ElasticTestCase, self).tearDown()
self.teardown_indexes()
def refresh(self, index=None):
index = index or Index.objects.get_current().prefixed_name
# Any time we're doing a refresh, we're making sure that the
# index is ready to be queried. Given that, it's almost
# always the case that we want to run all the generated tasks,
# then refresh.
connections.get_connection().indices.refresh(index=index)
def setup_indexes(self):
"""Clear and repopulate the current index."""
WikiDocumentType.reindex_all()
def teardown_indexes(self):
es = connections.get_connection()
for index in Index.objects.all():
# Ignore indices that do not exist.
es.indices.delete(index.prefixed_name, ignore=[404])
def get_request(self, *args, **kwargs):
request = factory.get(*args, **kwargs)
# setting request.LANGUAGE_CODE correctly
activate_language_from_request(request)
return request
|
mpl-2.0
| -8,934,910,198,099,170,000
| 31.101266
| 74
| 0.6597
| false
| 4.219634
| true
| false
| false
|
xantage/code
|
vilya/libs/text.py
|
1
|
2278
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import urllib
import hashlib
from mikoto.libs.text import *
from mikoto.libs.emoji import *
from vilya.config import EMAIL_SUFFIX
def trunc_utf8(string, num, etc="..."):
"""truncate a utf-8 string, show as num chars.
arg: string, a utf-8 encoding string; num, look like num chars
return: a utf-8 string
"""
try:
gb = string.decode("utf8", "ignore")
except UnicodeEncodeError: # Already decoded
gb = string
gb = gb.encode("gb18030", "ignore")
if num >= len(gb):
return string
if etc:
etc_len = len(etc.decode("utf8", "ignore").encode("gb18030", "ignore"))
trunc_idx = num - etc_len
else:
trunc_idx = num
ret = gb[:trunc_idx].decode("gb18030", "ignore").encode("utf8")
if etc:
ret += etc
return ret
EMAILRE = re.compile(
r'^[_\.0-9a-zA-Z+-]+@([0-9a-zA-Z]+[0-9a-zA-Z-]*\.)+[a-zA-Z]{2,4}$')
def _validate_email(email):
if not email:
return False
if len(email) >= 6:
return EMAILRE.match(email) is not None
return False
# FIXME: bad smell, useful ?
def email_normalizer(name, email):
if _validate_email(email):
return email
else:
return name + '@' + EMAIL_SUFFIX
def is_image(fname):
return bool(RE_IMAGE_FILENAME.match(fname))
def is_binary(fname):
ext = fname.split('.')
if ext is None:
return False
if len(ext) == 1:
return ext[0] not in SOURCE_FILE
ext = '.' + ext[-1]
if ext in IS_GENERATED:
return False
if ext in IGNORE_FILE_EXTS or ext not in (SOURCE_FILE + NOT_GENERATED):
return True
return False
def gravatar_url(email, size=140):
default = "http://img3.douban.com/icon/user_normal.jpg"
url = "http://douvatar.dapps.douban.com/mirror/" + hashlib.md5(
email.encode('utf8').lower()).hexdigest() + "?"
url += urllib.urlencode({'d': default, 's': str(size), 'r': 'x'})
return url
def remove_unknown_character(text):
if isinstance(text, str):
return text.decode('utf-8', 'ignore').encode('utf-8', 'ignore')
return text
def plural(count, single, plural):
if count <= 1:
return single
else:
return plural
|
bsd-3-clause
| 6,086,639,086,148,238,000
| 24.595506
| 79
| 0.598332
| false
| 3.254286
| false
| false
| false
|
ThomasTheSpaceFox/Desutezeoid
|
dzulib1.py
|
1
|
6392
|
#!/usr/bin/env python
import pygame.event
import pygame.key
import pygame.display
import pygame.image
import pygame.mixer
import pygame
import sys
import time
import os
pygame.display.init()
#this library should contain any functions and data needed by dezutezeoid
#that don't need to be in the actual engine executable
#some functions do rely on variables only present within DZU-ENG1.py however.
print ("dzulib initalized")
#inital main.sav file structure
savtree='''<?xml version="1.0" encoding="UTF-8"?>
<sav>
<keysav>
</keysav>
<plugsav>
</plugsav>
<pagelink/>
</sav>
'''
#main.sav init.
def initmainsave():
print ('Initalize main.sav')
mainsavfile = open(os.path.join("save", 'autosave.sav'), 'w')
mainsavfile.write(savtree)
mainsavfile.close()
def definepluginlist(pluglist):
global pluglistactive
pluglistactive=pluglist
#image scrollers
def vscroll(scrollval, image):
offs=image.get_height()
newimage=image.copy()
newimage.fill((0, 0, 0, 0))
newimage.blit(image, (0, scrollval))
if (str(scrollval))[0]=="-":
newimage.blit(image, (0, (scrollval + offs)))
else:
newimage.blit(image, (0, (scrollval - offs)))
return newimage
def hscroll(scrollval, image):
offs=image.get_width()
newimage=image.copy()
newimage.fill((0, 0, 0, 0))
newimage.blit(image, (scrollval, 0))
if (str(scrollval))[0]=="-":
newimage.blit(image, ((scrollval + offs), 0))
else:
newimage.blit(image, ((scrollval - offs), 0))
return newimage
imagepath='img'
filedict={}
textdict={}
#image alpha optimization detection.
def imagealphaoff(filename):
if (filename.lower()).endswith(".jpg") or (filename.lower()).endswith(".jpeg") or (filename.lower()).startswith("no-tr"):
return 1
else:
return 0
dummyimage=pygame.Surface((48, 48))
dummyimage.fill((255, 0, 255))
def filelookup(filename, bypasscache=False,):
global filedict
if filename in filedict and bypasscache==False:
return filedict[filename]
else:
try:
if imagealphaoff(filename):
imgret=pygame.image.load(os.path.join(imagepath, filename)).convert()
#print "noalpha"
else:
imgret=pygame.image.load(os.path.join(imagepath, filename)).convert_alpha()
#print "alpha"
filedict[filename]=imgret
return imgret
except pygame.error:
#if error, check if any plugin's imageloader API functions understand it.
for plug in pluglistactive:
#plugin imageloader API.
try:
imgret=plug.imageloader(filename)
if imgret!=None:
#cache result of imageloader call.
filedict[filename]=imgret
return imgret
except AttributeError:
continue
#nocache variant.
try:
#nocache version of imageloader.
imgret=plug.imageloader_nocache(filename)
if imgret!=None:
return imgret
except AttributeError:
continue
#if all else fails print error message and return dummy image.
print("IMAGE FILENAME ERROR: nonvalid image filename. returning dummy image...")
return dummyimage
#convienence function.
#give it a color, be it a rgb touple,
# html hex, or pygame color object, and it will always spit out a pygame color object.
def colorify(colorobj):
if type(colorobj) is pygame.Color:
return colorobj
else:
return pygame.Color(colorobj)
def textrender(text, size, fgcolor, bgcolor, transp):
#ensure colors are pygame.Color objects
fgcolor=colorify(fgcolor)
bgcolor=colorify(bgcolor)
#generate string forms of fg and bg colors for key.
kfgcolor=str(fgcolor.r)+str(fgcolor.g)+str(fgcolor.b)
kbgcolor=str(bgcolor.r)+str(bgcolor.g)+str(bgcolor.b)
global textdict
keyx=(text + str(size) + kfgcolor + kbgcolor + str(transp))
if keyx in textdict:
return textdict[keyx]
else:
texfnt=pygame.font.SysFont(None, size)
if transp==0:
texgfx=texfnt.render(text, True, fgcolor, bgcolor)
else:
texgfx=texfnt.render(text, True, fgcolor)
textdict[keyx]=texgfx
return texgfx
class clicktab:
def __init__(self, box, reftype, ref, keyid, takekey, sfxclick, sound, quitab=0, data=None):
self.box=box
self.ref=ref
self.keyid=keyid
self.takekey=takekey
self.reftype=reftype
self.sfxclick=sfxclick
self.sound=sound
self.quitab=quitab
self.data=data
def ctreport(box, selfref, dataval):
return clicktab(box, 'report', selfref, '0', '0', 0, None, quitab=0, data=dataval)
def colorboost(colorobj, amnt):
colorobj=colorify(colorobj)
rcol=colorobj.r
gcol=colorobj.g
bcol=colorobj.b
rcol+=amnt
gcol+=amnt
bcol+=amnt
if rcol>255:
rcol=255
if rcol<0:
rcol=0
if gcol>255:
gcol=255
if gcol<0:
gcol=0
if bcol>255:
bcol=255
if bcol<0:
bcol=0
return pygame.Color(rcol, gcol, bcol)
def trace3dbox(surface, basecolor, rect, linewidth=1):
basetint=colorboost(basecolor, 40)
baseshad=colorboost(basecolor, -40)
pygame.draw.line(surface, basetint, rect.topleft, rect.topright, linewidth)
pygame.draw.line(surface, basetint, rect.topleft, rect.bottomleft, linewidth)
pygame.draw.line(surface, baseshad, rect.bottomleft, rect.bottomright, linewidth)
pygame.draw.line(surface, baseshad, rect.topright, rect.bottomright, linewidth)
def colorchanlimit(color):
if color>255:
return 255
elif color<0:
return 0
else:
return color
#non-alpha 2-color gradient function. outputs a 200x200 surface, use rotval 0 for no rotation.
#rotation values of non-90-degree increments will cause the returned surface to be LARGER than 200x200.
def makegradient(startcolor, endcolor, rotval):
#print startcolor
#print endcolor
gradsurf = pygame.Surface((200, 200))
startcolor = colorify(startcolor)
endcolor = colorify(endcolor)
#calculate float increment values for each color channel
inccolorR = (startcolor.r - endcolor.r) / 200.0
inccolorG = (startcolor.g - endcolor.g) / 200.0
inccolorB = (startcolor.b - endcolor.b) / 200.0
#initalize float color data storage values
startcolorR = startcolor.r
startcolorG = startcolor.g
startcolorB = startcolor.b
colcnt = 0
#draw gradient
while colcnt < 200:
#draw horizontal line
pygame.draw.line(gradsurf, startcolor, (0, colcnt), (200, colcnt))
startcolorR -= inccolorR
startcolorG -= inccolorG
startcolorB -= inccolorB
#update color channels
startcolor.r = colorchanlimit(int(startcolorR))
startcolor.g = colorchanlimit(int(startcolorG))
startcolor.b = colorchanlimit(int(startcolorB))
colcnt += 1
if rotval==0:
return gradsurf
else:
return pygame.transform.rotate(gradsurf, rotval)
|
gpl-3.0
| 7,944,259,049,090,231,000
| 26.316239
| 122
| 0.728723
| false
| 2.861235
| false
| false
| false
|
timmo/gardeningPI
|
gists/integration.py
|
1
|
20203
|
import math
import requests
from requests.exceptions import *
import json
import astral
import datetime
import hashlib
from enum import Enum, unique
from kivy.clock import Clock
from kivy.logger import Logger
# Using I2C on RasPi for reading a light sensor
import platform
if 'arm' in platform.uname().machine:
import smbus # used for I2C connection to TSL2516 lux sensor
class ParsingException(Exception):
pass
@unique
class WeatherCondition(Enum):
clear = 1
cloudy = 2
drizzle = 3
rain = 4
heavy_rain = 5
hail = 6
snow = 7
heavy_snow = 8
fog = 9
wind = 10
thunderstorm = 11
tornado = 12
# TODO: load credentials from external file?
class IntegrationBase:
def __init__(self):
super().__init__()
self.refresh_data_time = 900 # every 15 minutes
Clock.schedule_interval(self.refresh, self.refresh_data_time)
def refresh(self, dt):
pass
class NetatmoIntegration(IntegrationBase):
_baseUrl = "https://api.netatmo.net/"
def __init__(self, client_id, client_secret, username, password):
super().__init__()
# TODO: load credentials from external file?
self.clientId = client_id
self.clientSecret = client_secret
self.username = username
self.password = password
self.access_token = None
self.refresh_token = None
self.refresh_access_token_time = -1
self.retry_authentication_time = 60 # every minute
self.wifiStatus = None
self.calibratingCo2 = False
self.name = "Anonymous"
self.position = astral.Location()
self.inside = {
'temperature': {
'current': "88.8"
},
'humidity': 100,
'co2': 8888.8
}
self.outside = {
'temperature': {
'min': -25.0,
'current': 38.8,
'max': 45.0
},
'battery': 100,
'connection': 100
}
self.rain = {
'rain': {
'hour': 88.8,
'day': 88.8
},
'battery': 100,
'connection': 100
}
self.alarm = []
self.locale = ''
Clock.schedule_once(self.authenticate)
def authenticate(self, dt):
Logger.debug('Netatmo: Starting authentication')
try:
params = {
"grant_type": "password",
"client_id": self.clientId,
"client_secret": self.clientSecret,
"username": self.username,
"password": self.password,
"scope": "read_station"
}
# REQUEST
response = requests.post(NetatmoIntegration._baseUrl + "oauth2/token", data=params).json()
#
# TODO: Check response
except RequestException as rex:
# Authentication failed
Logger.debug('Netatmo: Failed to authenticate')
Logger.exception(str(rex))
Clock.schedule_once(self.authenticate, self.retry_authentication_time) # TODO only for network related errors
return
self.access_token = response['access_token']
self.refresh_token = response['refresh_token']
self.refresh_access_token_time = response['expires_in']
Clock.schedule_once(self.refresh_access_token, self.refresh_access_token_time / 2)
Logger.debug('Netatmo: authentication successful')
def refresh_access_token(self, dt):
Logger.debug('Netatmo: Starting refresh of access token')
try:
# Refresh access token
params = {
"grant_type": "refresh_token",
"refresh_token": self.refresh_token,
"client_id": self.clientId,
"client_secret": self.clientSecret
}
response = requests.post(NetatmoIntegration._baseUrl + "oauth2/token", data=params).json()
# TODO: Check response
except RequestException as rex:
Logger.debug('Netatmo: Failed to refresh access token')
Logger.exception(str(rex))
Clock.schedule_once(self.authenticate,
self.retry_authentication_time) # TODO only for authentication related errors
return
self.refresh_token = response['refresh_token']
self.refresh_access_token_time = response['expires_in']
Clock.schedule_once(self.refresh_access_token, self.refresh_access_token_time / 2)
Logger.debug('Netatmo: Access token refreshed successfully')
def refresh(self, dt):
super().refresh(dt)
Logger.debug('Netatmo: Starting data refresh')
# Load data from netatmo portal
try:
# Read weather station
params = {
"access_token": self.access_token
}
response = requests.post(NetatmoIntegration._baseUrl + "api/getstationsdata", data=params)
#print(json.dumps(response.json(), sort_keys=True, indent=4, separators=(',', ': ')))
except RequestException as rex:
Logger.debug('Netatmo: Failed to refresh data')
Logger.exception(str(rex))
Logger.debug(str(response.content))
return
# Parse Response
try:
# TODO identify errors like
# {
# "error": {
# "code": 500,
# "message": "Internal Server Error"
# }
# }
# This is the station's locale string for displaying values
self.locale = response.json()['body']['user']['administrative']['reg_locale'].replace('-', '_')
station = response.json()['body']['devices'][0]
self.name = station['station_name']
self.wifiStatus = station['wifi_status']
self.calibratingCo2 = station['co2_calibrating']
self.position.name = self.name
self.position.region = station['place']['city']
self.position.latitude = station['place']['location'][1]
self.position.longitude = station['place']['location'][0]
self.position.timezone = station['place']['timezone']
self.position.elevation = 0
Logger.debug("Netatmo: Location is {} ({}, {}); timezone: {}".format(
str(self.position.region), str(self.position.latitude), str(self.position.longitude),
str(self.position.timezone)))
# Inside module
data = station['dashboard_data']
self.inside['temperature'] = {
'current': data['Temperature'],
'min': data['min_temp'],
'max': data['max_temp'],
'trend': data['temp_trend'] if 'temp_trend' in data else 0
}
self.inside['co2'] = data['CO2']
self.inside['humidity'] = data['Humidity']
self.inside['pressure'] = {
'current': data['Pressure'],
'trend': data['pressure_trend']
}
self.inside['noise'] = data['Noise']
# TODO: find a better way of identifying the modules (sequence depends on configuration)
# outside module
data = station['modules'][1]
self.outside['battery'] = data['battery_percent']
self.outside['connection'] = data['rf_status']
data = station['modules'][1]['dashboard_data']
self.outside['temperature'] = {
'current': data['Temperature'],
'min': data['min_temp'],
'max': data['max_temp'],
'trend': data['temp_trend'] if 'temp_trend' in data else 0
}
self.outside['humidity'] = data['Humidity']
# rain module
data = station['modules'][0]
self.rain['battery'] = data['battery_percent']
self.rain['connection'] = data['rf_status']
data = station['modules'][0]['dashboard_data']
self.rain['rain'] = {
'hour': data['sum_rain_1'],
'day': data['sum_rain_24'] if 'sum_rain_24' in data else 0
}
# alarms
if 'meteo_alarms' in station:
for alarm in station['meteo_alarms']:
self.alarm.append({
'type': alarm['type'],
'level': alarm['level'],
'description': alarm['descr'][13:]
})
Logger.debug('Netatmo: Data refresh successful')
except (KeyError, ValueError) as err:
Logger.debug('Netatmo: Failed to parse json')
Logger.exception(str(err))
Logger.debug(str(response.content))
class OpenWeatherMapIntegration(IntegrationBase):
_baseUrl = "http://api.openweathermap.org/data/2.5/"
_iconUrl = "http://openweathermap.org/img/w/"
def __init__(self, position, app_id):
super().__init__()
self.position = position
self.appId = app_id
self.forecast = []
# Converts OWM weather id to common weather condition
def _convert_weather_id(self, weather_id):
if 200 <= weather_id <= 299:
return WeatherCondition.thunderstorm
if 300 <= weather_id <= 399:
return WeatherCondition.drizzle
# 400 range does not exist?
if 500 == weather_id:
return WeatherCondition.drizzle
if 501 == weather_id:
return WeatherCondition.rain
if 502 <= weather_id <= 599:
return WeatherCondition.heavy_rain
if 600 <= weather_id <= 601:
return WeatherCondition.snow
if 602 <= weather_id <= 699:
return WeatherCondition.heavy_snow
if 700 <= weather_id <= 780:
return WeatherCondition.fog
if weather_id == 781:
return WeatherCondition.tornado
# Clear Sky
if weather_id == 800:
return WeatherCondition.clear
# Clouds
if 801 <= weather_id <= 804:
return WeatherCondition.cloudy
if 900 <= weather_id <= 902:
return WeatherCondition.tornado
if weather_id == 905 or 957 <= weather_id <= 962:
return WeatherCondition.wind
if weather_id == 906:
return WeatherCondition.hail
return None
def refresh(self, dt):
super().refresh(dt)
Logger.debug('OWM: Starting data refresh')
Logger.debug("OWM: using location {} ({}, {}); timezone: {}".format(
str(self.position.region), str(self.position.latitude), str(self.position.longitude),
str(self.position.timezone)))
try:
# Forecast (16 days)
params = {
"lat": self.position.latitude,
"lon": self.position.longitude,
"mode": "json",
"appid": self.appId,
"units": "metric",
"lang": "de",
"cnt": 10
}
response = requests.get(OpenWeatherMapIntegration._baseUrl + "forecast/daily", params=params);
# print(json.dumps(response.json(), indent=2))
except RequestException as rex:
Logger.debug('OWM: Failed to refresh data')
Logger.exception(str(rex))
return
# Parse response
try:
for entry in response.json()['list']:
timestamp = datetime.datetime.fromtimestamp(entry['dt'])
self.forecast.append({
'time': timestamp,
'description': entry['weather'][0]['description'],
'icon': entry['weather'][0]['icon'],
'id': self._convert_weather_id(entry['weather'][0]['id']),
'temperature': {
"min": float(format(entry['temp']['min'], '.1f')),
"max": float(format(entry['temp']['max'], '.1f')),
},
'pressure': entry['pressure'],
'humidity': entry['humidity'],
'clouds': entry['clouds'] if 'clouds' in entry else 0,
'snow': entry['snow'] if 'snow' in entry else 0,
'rain': entry['rain'] if 'rain' in entry else 0
})
except KeyError as kerr:
Logger.debug('OWM: Failed to parse json')
Logger.exception(str(kerr))
Logger.debug(str(response.content))
Logger.debug('OWM: Data refresh successful')
# Only gives min/max temperature for today and next two days
class WetterComIntegration(IntegrationBase):
_baseUrl = "http://api.wetter.com/forecast/weather/city/{}/project/{}/cs/{}"
def __init__(self, city_code, project_name, api_key):
super().__init__()
self.minimumTemperature = -25;
self.maximumTemperature = 45;
self.id = None;
self._city_code = city_code
self._project_name = project_name
self._api_key = api_key
# Converts Wetter.com id to common weather condition
def _convert_weather_id(self, weather_id):
if weather_id == 0:
return WeatherCondition.clear
if weather_id in (1, 2, 3) or 10 <= weather_id <= 39:
return WeatherCondition.cloudy
if weather_id == 4 or 40 <= weather_id <= 49:
return WeatherCondition.fog
if weather_id in (5, 50, 51, 53, 56):
return WeatherCondition.drizzle
if weather_id in (6, 8, 60, 61, 63):
return WeatherCondition.rain
if weather_id in (55, 65, 80, 81, 82):
return WeatherCondition.heavy_rain
if weather_id in (57, 66, 67, 69, 83, 84):
return WeatherCondition.hail
if weather_id in (7, 68, 70, 71, 73, 85):
return WeatherCondition.snow
if weather_id in (75, 86):
return WeatherCondition.heavy_snow
if weather_id == 9 or 90 <= weather_id <= 99:
return WeatherCondition.thunderstorm
return None
def refresh(self, dt):
super().refresh(dt)
Logger.debug('Wetter.com: Starting data refresh')
# Read current weather from wetter.com
try:
params = {
"output": 'json'
}
checksum = hashlib.md5(self._project_name.encode('utf-8') + self._api_key.encode('utf-8') +
self._city_code.encode('utf-8')).hexdigest()
response = requests.get(WetterComIntegration._baseUrl.format(self._city_code, self._project_name, checksum),
params=params);
# print(json.dumps(response.json(), sort_keys=True, indent=4, separators=(',', ': ')))
data = response.json()
except RequestException and ValueError and ConnectionError as ex:
Logger.debug('Wetter.com: Failed to refresh data')
Logger.exception(str(ex))
if 'response' in locals():
msg = str(response.content)
else:
msg = ""
Logger.debug(msg)
return
# Parse response
try:
now = datetime.datetime.now()
for daystring, forecast in data['city']['forecast'].items():
day = datetime.datetime.strptime(daystring, '%Y-%m-%d')
if day.date() == now.date():
self.minimumTemperature = float(forecast['tn'])
self.maximumTemperature = float(forecast['tx'])
# TODO: take values from last day for range 00:00 .. 05:59
if 6 <= now.hour <= 10:
weather_id = forecast['06:00']['w']
elif 11 <= now.hour <= 16:
weather_id = forecast['11:00']['w']
elif 17 <= now.hour <= 22:
weather_id = forecast['17:00']['w']
else:
weather_id = forecast['23:00']['w']
self.id = self._convert_weather_id(int(weather_id))
break
else:
Logger.warning('Wetter.com: Unable to find date {} in forecast'.format(now.strftime('%Y-%m-%d')))
except KeyError and AttributeError as err:
Logger.warning('Wetter.com: Unable to parse json')
Logger.debug('Wetter.com: \n' +
json.dumps(response.json(), sort_keys=True, indent=4, separators=(',', ': ')))
Logger.exception(str(err))
Logger.debug('Wetter.com: Data refresh successful')
Logger.debug('Wetter.com: Got id {}'.format(self.id))
# This is the new, improved version for brightness control, using a TSL2561 via I2C
class TSL2516BrightnessRegulation(IntegrationBase):
def __init__(self):
super().__init__()
self.bus = 1
self.address = 0x39
self.ambient_light = 0
self.infrared_light = 0
self.lux = 2.0
# Weight of latest lux measurement in overall brightness calculation. Used for slowing down changes in
# brightness. A value of 1.0 completely ignores the old lux value
self.new_lux_weight = 0.05;
self.brightness = 120
self.min_brightness = 15
self.max_brightness = 255
self.device = '/sys/class/backlight/rpi_backlight/brightness'
def refresh(self, dt):
super().refresh(dt)
# Measure brightness via TSL2516 lux sensor on I2C bus 1
# see http://www.mogalla.net/201502/lichtsensor-tsl2561-am-raspberry (german)
# Code would benefit from a block read command. smbus-cffi 0.5.1 documentation mentions that block reads
# currently crash with a kernel panic on RasPi. Thus, reading single bytes.
# TODO: Try block reads
try:
bus = smbus.SMBus(self.bus)
bus.write_byte_data(self.address, 0x80, 0x03) # init measurement
lb = bus.read_byte_data(self.address, 0x8c)
hb = bus.read_byte_data(self.address, 0x8d)
self.ambient_light = (hb << 8) | lb
lb = bus.read_byte_data(self.address, 0x8e)
hb = bus.read_byte_data(self.address, 0x8f)
self.infrared_light = (hb << 8) + lb
except IOError as ex:
Logger.warning("Brightness: Problems using I2C bus ({}) ".format(str(ex)))
# TODO: some countermeasure? bus reset?
return
# Calculate Lux value (see example in TSL2561 datasheet)
if self.ambient_light == 0:
return # ratio would result in div by 0, avoid
ratio = self.infrared_light / float(self.ambient_light)
if 0 < ratio <= 0.50:
new_lux = 0.0304 * self.ambient_light - 0.062 * self.ambient_light * (ratio ** 1.4)
elif 0.50 < ratio <= 0.61:
new_lux = 0.0224 * self.ambient_light - 0.031 * self.infrared_light
elif 0.61 < ratio <= 0.80:
new_lux = 0.0128 * self.ambient_light - 0.0153 * self.infrared_light
elif 0.80 < ratio <= 1.3:
new_lux = 0.00146 * self.ambient_light - 0.00112 * self.infrared_light
else:
new_lux = 0
# Weighted average of old and current value
self.lux = (1.0 - self.new_lux_weight) * self.lux + self.new_lux_weight * new_lux
# Use a logarithmic function to map lux to brightness, clamp to min..max
new_brightness = max(self.min_brightness, min(round(math.log10(self.lux+1.5)*300.0), self.max_brightness))
# Write to device
if self.brightness != new_brightness:
Logger.debug('Brightness: Setting to {} ({} lux) - current {} lux)'.format(
str(new_brightness), "%.2f" % self.lux, "%.2f" % new_lux))
self.brightness = new_brightness
with open(self.device, 'w') as d:
d.write(str(self.brightness))
|
apache-2.0
| -8,395,176,535,836,273,000
| 37.629063
| 121
| 0.542395
| false
| 4.127273
| false
| false
| false
|
KurtHaffner/Weather_Project
|
test.py
|
1
|
3352
|
from tkinter import *
from pygeocoder import Geocoder
import forecastio
def getForecast(latt, lngg, results):
#Set up the box that will hold all the results.
result = Tk()
result.title("Weather Results")
api_key = "d747bd6c3aa83c90ecc20dbbb019d5ea"
lat = latt
lng = lngg
forecast = forecastio.load_forecast(api_key, lat, lng)
#Get the daily forecast and print a nice summary.
byday = forecast.daily()
#Add labels with the location and summary.
something = "The weather for {0}, {1}.".format(results[0].city,results[0].state)
Label(result, text=something).grid(row=0)
Label(result, text="").grid(row=1)
Label(result, text=byday.summary).grid(row=2)
#Get the current data in a datapoint.
current = forecast.currently()
#Set up variables for all the data needed.
temp = current.temperature
feelsTemp = current.apparentTemperature
humidity = current.humidity
prob = current.precipProbability * 100
dis = current.nearestStormDistance
intensity = current.precipIntensity
#Print the temperature and feels like temp with humidity.
something = "The current temperature is {0} degrees fahrenheit.".format(temp)
something1 = "The temperature feels like {0} degrees fahrenheit.".format(feelsTemp)
something2 = "The current humidity is {0}%.".format(humidity*100)
#Add labels for temperature and feels like temp with humidity.
Label(result, text="").grid(row=3)
Label(result, text=something).grid(row=4)
Label(result, text=something1).grid(row=5)
Label(result, text=something2).grid(row=6)
#Print the hourly summary.
byHour = forecast.hourly()
#Add hourly summary.
Label(result, text="").grid(row=7)
Label(result, text=byHour.summary).grid(row=8)
#Print the storm and rain/snow information.
something = "The probablity of precipitation right now is {0}%".format(prob)
something1 = "The nearest storm is {0} miles away.".format(dis)
#Add the storm and rain/snow info.
Label(result, text="").grid(row=9)
Label(result, text=something).grid(row=10)
Label(result, text=something1).grid(row=11)
#Check to see if the probability is high enough to print storm info.
if prob >= 50.0:
typePrec = current.precipType
something = "The precipitation intensity is {0} inches an hour.".format(intensity)
something1 = "The type of precipitation is {0}.".format(typePrec)
#Add to the window.
Label(result, text="").grid(row=12)
Label(result, text=something).grid(row=13)
Label(result, text=something1).grid(row=14)
return
def do_stuff():
#Put the input into a geocoder object.
results = Geocoder.geocode(e1.get())
#Call the getForecast function with lat and long.
getForecast(results[0].latitude,results[0].longitude, results)
#End the GUI.
master.destroy()
#Set up the prompt for finding the lat and long.
master = Tk()
master.title("Weather Widget")
Label(master, text="Please enter an address, city or zip code").grid(row=0)
e1 = Entry(master)
e1.grid(row=1, column=0)
Button(master, text="Get Weather", command=do_stuff).grid(row=2, column=0, sticky=W, pady=4)
Button(master, text="Quit", command=master.destroy).grid(row=2, column=1, sticky=W, pady=4)
mainloop()
|
apache-2.0
| 5,965,499,589,496,558,000
| 30.92381
| 92
| 0.686158
| false
| 3.365462
| false
| false
| false
|
shankari/folium
|
folium/map.py
|
1
|
29178
|
# -*- coding: utf-8 -*-
"""
Map
------
Classes for drawing maps.
"""
from __future__ import unicode_literals
import json
from collections import OrderedDict
from jinja2 import Environment, PackageLoader, Template
from branca.six import text_type, binary_type
from branca.utilities import _parse_size
from branca.element import (Element, Figure, MacroElement, Html,
JavascriptLink, CssLink)
ENV = Environment(loader=PackageLoader('folium', 'templates'))
_default_js = [
('leaflet',
'https://unpkg.com/leaflet@1.0.1/dist/leaflet.js'),
('jquery',
'https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js'),
('bootstrap',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js'),
('awesome_markers',
'https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.js'), # noqa
('marker_cluster_src',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/leaflet.markercluster-src.js'), # noqa
('marker_cluster',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/leaflet.markercluster.js'), # noqa
]
_default_css = [
('leaflet_css',
'https://unpkg.com/leaflet@1.0.1/dist/leaflet.css'),
('bootstrap_css',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css'),
('bootstrap_theme_css',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap-theme.min.css'), # noqa
('awesome_markers_font_css',
'https://maxcdn.bootstrapcdn.com/font-awesome/4.6.3/css/font-awesome.min.css'), # noqa
('awesome_markers_css',
'https://cdnjs.cloudflare.com/ajax/libs/Leaflet.awesome-markers/2.0.2/leaflet.awesome-markers.css'), # noqa
('marker_cluster_default_css',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/MarkerCluster.Default.css'), # noqa
('marker_cluster_css',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/1.0.0/MarkerCluster.css'), # noqa
('awesome_rotate_css',
'https://rawgit.com/python-visualization/folium/master/folium/templates/leaflet.awesome.rotate.css'), # noqa
]
class LegacyMap(MacroElement):
"""Create a Map with Folium and Leaflet.js
Generate a base map of given width and height with either default
tilesets or a custom tileset URL. The following tilesets are built-in
to Folium. Pass any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Map (Northing, Easting).
width: pixel int or percentage string (default: '100%')
Width of the map.
height: pixel int or percentage string (default: '100%')
Height of the map.
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can choose from a list of built-in tiles,
pass a custom URL or pass `None` to create a map without tiles.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
max_zoom: int, default 18
Maximum zoom depth for the map.
zoom_start: int, default 10
Initial zoom level for the map.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
detect_retina: bool, default False
If true and user is on a retina display, it will request four
tiles of half the specified size and a bigger zoom level in place
of one to utilize the high resolution.
crs : str, default 'EPSG3857'
Defines coordinate reference systems for projecting geographical points
into pixel (screen) coordinates and back.
You can use Leaflet's values :
* EPSG3857 : The most common CRS for online maps, used by almost all
free and commercial tile providers. Uses Spherical Mercator projection.
Set in by default in Map's crs option.
* EPSG4326 : A common CRS among GIS enthusiasts.
Uses simple Equirectangular projection.
* EPSG3395 : Rarely used by some commercial tile providers.
Uses Elliptical Mercator projection.
* Simple : A simple CRS that maps longitude and latitude into
x and y directly. May be used for maps of flat surfaces
(e.g. game maps). Note that the y axis should still be inverted
(going from bottom to top).
control_scale : bool, default False
Whether to add a control scale on the map.
prefer_canvas : bool, default False
Forces Leaflet to use the Canvas back-end (if available) for
vector layers instead of SVG. This can increase performance
considerably in some cases (e.g. many thousands of circle
markers on the map).
no_touch : bool, default False
Forces Leaflet to not use touch events even if it detects them.
disable_3d : bool, default False
Forces Leaflet to not use hardware-accelerated CSS 3D
transforms for positioning (which may cause glitches in some
rare environments) even if they're supported.
Returns
-------
Folium LegacyMap Object
Examples
--------
>>> map = folium.LegacyMap(location=[45.523, -122.675],
... width=750, height=500)
>>> map = folium.LegacyMap(location=[45.523, -122.675],
... tiles='Mapbox Control Room')
>>> map = folium.LegacyMap(location=(45.523, -122.675), max_zoom=20,
... tiles='Cloudmade', API_key='YourKey')
>>> map = folium.LegacyMap(location=[45.523, -122.675], zoom_start=2,
... tiles=('http://{s}.tiles.mapbox.com/v3/'
... 'mapbox.control-room/{z}/{x}/{y}.png'),
... attr='Mapbox attribution')
"""
def __init__(self, location=None, width='100%', height='100%',
left="0%", top="0%", position='relative',
tiles='OpenStreetMap', API_key=None, max_zoom=18, min_zoom=1,
zoom_start=10, continuous_world=False, world_copy_jump=False,
no_wrap=False, attr=None, min_lat=-90, max_lat=90,
min_lon=-180, max_lon=180, max_bounds=True,
detect_retina=False, crs='EPSG3857', control_scale=False,
prefer_canvas=False, no_touch=False, disable_3d=False):
super(LegacyMap, self).__init__()
self._name = 'Map'
self._env = ENV
if not location:
# If location is not passed we center and ignore zoom.
self.location = [0, 0]
self.zoom_start = min_zoom
else:
self.location = location
self.zoom_start = zoom_start
Figure().add_child(self)
# Map Size Parameters.
self.width = _parse_size(width)
self.height = _parse_size(height)
self.left = _parse_size(left)
self.top = _parse_size(top)
self.position = position
self.min_lat = min_lat
self.max_lat = max_lat
self.min_lon = min_lon
self.max_lon = max_lon
self.max_bounds = max_bounds
self.continuous_world = continuous_world
self.no_wrap = no_wrap
self.world_copy_jump = world_copy_jump
self.crs = crs
self.control_scale = control_scale
self.global_switches = GlobalSwitches(prefer_canvas, no_touch, disable_3d)
if tiles:
self.add_tile_layer(
tiles=tiles, min_zoom=min_zoom, max_zoom=max_zoom,
continuous_world=continuous_world, no_wrap=no_wrap, attr=attr,
API_key=API_key, detect_retina=detect_retina
)
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
position : {{this.position}};
width : {{this.width[0]}}{{this.width[1]}};
height: {{this.height[0]}}{{this.height[1]}};
left: {{this.left[0]}}{{this.left[1]}};
top: {{this.top[0]}}{{this.top[1]}};
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div class="folium-map" id="{{this.get_name()}}" ></div>
{% endmacro %}
{% macro script(this, kwargs) %}
{% if this.max_bounds %}
var southWest = L.latLng({{ this.min_lat }}, {{ this.min_lon }});
var northEast = L.latLng({{ this.max_lat }}, {{ this.max_lon }});
var bounds = L.latLngBounds(southWest, northEast);
{% else %}
var bounds = null;
{% endif %}
var {{this.get_name()}} = L.map(
'{{this.get_name()}}',
{center: [{{this.location[0]}},{{this.location[1]}}],
zoom: {{this.zoom_start}},
maxBounds: bounds,
layers: [],
worldCopyJump: {{this.world_copy_jump.__str__().lower()}},
crs: L.CRS.{{this.crs}}
});
{% if this.control_scale %}L.control.scale().addTo({{this.get_name()}});{% endif %}
{% endmacro %}
""") # noqa
def _repr_html_(self, **kwargs):
"""Displays the Map in a Jupyter notebook.
"""
if self._parent is None:
self.add_to(Figure())
out = self._parent._repr_html_(**kwargs)
self._parent = None
else:
out = self._parent._repr_html_(**kwargs)
return out
def add_tile_layer(self, tiles='OpenStreetMap', name=None,
API_key=None, max_zoom=18, min_zoom=1,
continuous_world=False, attr=None, active=False,
detect_retina=False, no_wrap=False, **kwargs):
"""
Add a tile layer to the map. See TileLayer for options.
"""
tile_layer = TileLayer(tiles=tiles, name=name,
min_zoom=min_zoom, max_zoom=max_zoom,
attr=attr, API_key=API_key,
detect_retina=detect_retina,
continuous_world=continuous_world,
no_wrap=no_wrap)
self.add_child(tile_layer, name=tile_layer.tile_name)
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
# Set global switches
figure.header.add_child(self.global_switches, name='global_switches')
# Import Javascripts
for name, url in _default_js:
figure.header.add_child(JavascriptLink(url), name=name)
# Import Css
for name, url in _default_css:
figure.header.add_child(CssLink(url), name=name)
figure.header.add_child(Element(
'<style>html, body {'
'width: 100%;'
'height: 100%;'
'margin: 0;'
'padding: 0;'
'}'
'</style>'), name='css_style')
figure.header.add_child(Element(
'<style>#map {'
'position:absolute;'
'top:0;'
'bottom:0;'
'right:0;'
'left:0;'
'}'
'</style>'), name='map_style')
super(LegacyMap, self).render(**kwargs)
class GlobalSwitches(Element):
def __init__(self, prefer_canvas=False, no_touch=False, disable_3d=False):
super(GlobalSwitches, self).__init__()
self._name = 'GlobalSwitches'
self.prefer_canvas = prefer_canvas
self.no_touch = no_touch
self.disable_3d = disable_3d
self._template = Template(
'<script>'
'L_PREFER_CANVAS = {% if this.prefer_canvas %}true{% else %}false{% endif %}; '
'L_NO_TOUCH = {% if this.no_touch %}true{% else %}false{% endif %}; '
'L_DISABLE_3D = {% if this.disable_3d %}true{% else %}false{% endif %};'
'</script>'
)
class Layer(MacroElement):
"""An abstract class for everything that is a Layer on the map.
It will be used to define whether an object will be included in
LayerControls.
Parameters
----------
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
"""
def __init__(self, name=None, overlay=False, control=True):
super(Layer, self).__init__()
self.layer_name = name if name is not None else self.get_name()
self.overlay = overlay
self.control = control
class TileLayer(Layer):
"""Create a tile layer to append on a Map.
Parameters
----------
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can choose from this list of built-in tiles:
- "OpenStreetMap"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
min_zoom: int, default 1
Minimal zoom for which the layer will be displayed.
max_zoom: int, default 18
Maximal zoom for which the layer will be displayed.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
detect_retina: bool, default False
If true and user is on a retina display, it will request four
tiles of half the specified size and a bigger zoom level in place
of one to utilize the high resolution.
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
"""
def __init__(self, tiles='OpenStreetMap', min_zoom=1, max_zoom=18,
attr=None, API_key=None, detect_retina=False,
continuous_world=False, name=None, overlay=False,
control=True, no_wrap=False):
self.tile_name = (name if name is not None else
''.join(tiles.lower().strip().split()))
super(TileLayer, self).__init__(name=self.tile_name, overlay=overlay,
control=control)
self._name = 'TileLayer'
self._env = ENV
self.min_zoom = min_zoom
self.max_zoom = max_zoom
self.no_wrap = no_wrap
self.continuous_world = continuous_world
self.detect_retina = detect_retina
self.tiles = ''.join(tiles.lower().strip().split())
if self.tiles in ('cloudmade', 'mapbox') and not API_key:
raise ValueError('You must pass an API key if using Cloudmade'
' or non-default Mapbox tiles.')
templates = list(self._env.list_templates(
filter_func=lambda x: x.startswith('tiles/')))
tile_template = 'tiles/'+self.tiles+'/tiles.txt'
attr_template = 'tiles/'+self.tiles+'/attr.txt'
if tile_template in templates and attr_template in templates:
self.tiles = self._env.get_template(tile_template).render(API_key=API_key) # noqa
self.attr = self._env.get_template(attr_template).render()
else:
self.tiles = tiles
if not attr:
raise ValueError('Custom tiles must'
' also be passed an attribution.')
if isinstance(attr, binary_type):
attr = text_type(attr, 'utf8')
self.attr = attr
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer(
'{{this.tiles}}',
{
maxZoom: {{this.max_zoom}},
minZoom: {{this.min_zoom}},
continuousWorld: {{this.continuous_world.__str__().lower()}},
noWrap: {{this.no_wrap.__str__().lower()}},
attribution: '{{this.attr}}',
detectRetina: {{this.detect_retina.__str__().lower()}}
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""") # noqa
class FeatureGroup(Layer):
"""
Create a FeatureGroup layer ; you can put things in it and handle them
as a single layer. For example, you can add a LayerControl to
tick/untick the whole group.
Parameters
----------
name : str, default None
The name of the featureGroup layer.
It will be displayed in the LayerControl.
If None get_name() will be called to get the technical (ugly) name.
overlay : bool, default True
Whether your layer will be an overlay (ticked with a check box in
LayerControls) or a base layer (ticked with a radio button).
"""
def __init__(self, name=None, overlay=True, control=True):
super(FeatureGroup, self).__init__(overlay=overlay, control=control, name=name) # noqa
self._name = 'FeatureGroup'
self.tile_name = name if name is not None else self.get_name()
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.featureGroup(
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class LayerControl(MacroElement):
"""
Creates a LayerControl object to be added on a folium map.
Parameters
----------
position : str
The position of the control (one of the map corners), can be
'topleft', 'topright', 'bottomleft' or 'bottomright'
default: 'topright'
collapsed : boolean
If true the control will be collapsed into an icon and expanded on
mouse hover or touch.
default: True
autoZIndex : boolean
If true the control assigns zIndexes in increasing order to all of
its layers so that the order is preserved when switching them on/off.
default: True
"""
def __init__(self, position='topright', collapsed=True, autoZIndex=True):
super(LayerControl, self).__init__()
self._name = 'LayerControl'
self.position = position
self.collapsed = str(collapsed).lower()
self.autoZIndex = str(autoZIndex).lower()
self.base_layers = OrderedDict()
self.overlays = OrderedDict()
self._template = Template("""
{% macro script(this,kwargs) %}
var {{this.get_name()}} = {
base_layers : { {% for key,val in this.base_layers.items() %}"{{key}}" : {{val}},{% endfor %} },
overlays : { {% for key,val in this.overlays.items() %}"{{key}}" : {{val}},{% endfor %} }
};
L.control.layers(
{{this.get_name()}}.base_layers,
{{this.get_name()}}.overlays,
{position: '{{this.position}}',
collapsed: {{this.collapsed}},
autoZIndex: {{this.autoZIndex}}
}).addTo({{this._parent.get_name()}});
{% endmacro %}
""") # noqa
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
# We select all Layers for which (control and not overlay).
self.base_layers = OrderedDict(
[(val.layer_name, val.get_name()) for key, val in
self._parent._children.items() if isinstance(val, Layer) and
(not hasattr(val, 'overlay') or not val.overlay) and
(not hasattr(val, 'control') or val.control)])
# We select all Layers for which (control and overlay).
self.overlays = OrderedDict(
[(val.layer_name, val.get_name()) for key, val in
self._parent._children.items() if isinstance(val, Layer) and
(hasattr(val, 'overlay') and val.overlay) and
(not hasattr(val, 'control') or val.control)])
super(LayerControl, self).render()
class Icon(MacroElement):
"""
Creates an Icon object that will be rendered
using Leaflet.awesome-markers.
Parameters
----------
color : str, default 'blue'
The color of the marker. You can use:
['red', 'blue', 'green', 'purple', 'orange', 'darkred',
'lightred', 'beige', 'darkblue', 'darkgreen', 'cadetblue',
'darkpurple', 'white', 'pink', 'lightblue', 'lightgreen',
'gray', 'black', 'lightgray']
icon_color : str, default 'white'
The color of the drawing on the marker. You can use colors above,
or an html color code.
icon : str, default 'info-sign'
The name of the marker sign.
See Font-Awesome website to choose yours.
Warning : depending on the icon you choose you may need to adapt
the `prefix` as well.
angle : int, default 0
The icon will be rotated by this amount of degrees.
prefix : str, default 'glyphicon'
The prefix states the source of the icon. 'fa' for font-awesome or
'glyphicon' for bootstrap 3.
For more details see:
https://github.com/lvoogdt/Leaflet.awesome-markers
"""
def __init__(self, color='blue', icon_color='white', icon='info-sign',
angle=0, prefix='glyphicon'):
super(Icon, self).__init__()
self._name = 'Icon'
self.color = color
self.icon = icon
self.icon_color = icon_color
self.angle = angle
self.prefix = prefix
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.AwesomeMarkers.icon({
icon: '{{this.icon}}',
iconColor: '{{this.icon_color}}',
markerColor: '{{this.color}}',
prefix: '{{this.prefix}}',
extraClasses: 'fa-rotate-{{this.angle}}'
});
{{this._parent.get_name()}}.setIcon({{this.get_name()}});
{% endmacro %}
""")
class Marker(MacroElement):
"""Create a simple stock Leaflet marker on the map, with optional
popup text or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
popup: string or folium.Popup, default None
Input text or visualization for object.
icon: Icon plugin
the Icon plugin to use to render the marker.
Returns
-------
Marker names and HTML in obj.template_vars
Examples
--------
>>> Marker(location=[45.5, -122.3], popup='Portland, OR')
>>> Marker(location=[45.5, -122.3], popup=folium.Popup('Portland, OR'))
"""
def __init__(self, location, popup=None, icon=None):
super(Marker, self).__init__()
self._name = 'Marker'
self.location = location
if icon is not None:
self.add_child(icon)
if isinstance(popup, text_type) or isinstance(popup, binary_type):
self.add_child(Popup(popup))
elif popup is not None:
self.add_child(popup)
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.marker(
[{{this.location[0]}},{{this.location[1]}}],
{
icon: new L.Icon.Default()
}
)
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def _get_self_bounds(self):
"""Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]
"""
return [[self.location[0], self.location[1]],
[self.location[0], self.location[1]]]
class Popup(Element):
"""Create a Popup instance that can be linked to a Layer.
Parameters
----------
html: string or Element
Content of the Popup.
max_width: int, default 300
The maximal width of the popup.
"""
def __init__(self, html=None, max_width=300):
super(Popup, self).__init__()
self._name = 'Popup'
self.header = Element()
self.html = Element()
self.script = Element()
self.header._parent = self
self.html._parent = self
self.script._parent = self
if isinstance(html, Element):
self.html.add_child(html)
elif isinstance(html, text_type) or isinstance(html, binary_type):
self.html.add_child(Html(text_type(html)))
self.max_width = max_width
self._template = Template(u"""
var {{this.get_name()}} = L.popup({maxWidth: '{{this.max_width}}'});
{% for name, element in this.html._children.items() %}
var {{name}} = $('{{element.render(**kwargs).replace('\\n',' ')}}')[0];
{{this.get_name()}}.setContent({{name}});
{% endfor %}
{{this._parent.get_name()}}.bindPopup({{this.get_name()}});
{% for name, element in this.script._children.items() %}
{{element.render()}}
{% endfor %}
""") # noqa
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
for name, child in self._children.items():
child.render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.script.add_child(Element(
self._template.render(this=self, kwargs=kwargs)),
name=self.get_name())
class FitBounds(MacroElement):
"""Fit the map to contain a bounding box with the
maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
"""
def __init__(self, bounds, padding_top_left=None,
padding_bottom_right=None, padding=None, max_zoom=None):
super(FitBounds, self).__init__()
self._name = 'FitBounds'
self.bounds = json.loads(json.dumps(bounds))
options = {
'maxZoom': max_zoom,
'paddingTopLeft': padding_top_left,
'paddingBottomRight': padding_bottom_right,
'padding': padding,
}
self.fit_bounds_options = json.dumps({key: val for key, val in
options.items() if val},
sort_keys=True)
self._template = Template(u"""
{% macro script(this, kwargs) %}
{% if this.autobounds %}
var autobounds = L.featureGroup({{ this.features }}).getBounds()
{% endif %}
{{this._parent.get_name()}}.fitBounds(
{% if this.bounds %}{{ this.bounds }}{% else %}"autobounds"{% endif %},
{{ this.fit_bounds_options }}
);
{% endmacro %}
""") # noqa
|
mit
| -2,931,999,732,305,539,600
| 38.42973
| 114
| 0.561587
| false
| 3.954194
| false
| false
| false
|
Pica4x6/numina
|
numina/core/dataholders.py
|
1
|
3419
|
#
# Copyright 2008-2014 Universidad Complutense de Madrid
#
# This file is part of Numina
#
# Numina is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Numina is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Numina. If not, see <http://www.gnu.org/licenses/>.
#
"""
Recipe requirements
"""
import inspect
from .types import NullType, PlainPythonType
from .types import ListOfType
class EntryHolder(object):
def __init__(self, tipo, description, destination, optional,
default, choices=None, validation=True):
super(EntryHolder, self).__init__()
if tipo is None:
self.type = NullType()
elif tipo in [bool, str, int, float, complex, list]:
self.type = PlainPythonType(ref=tipo())
elif isinstance(tipo, ListOfType):
self.type = tipo
elif inspect.isclass(tipo):
self.type = tipo()
else:
self.type = tipo
self.description = description
self.optional = optional
self.dest = destination
self.default = default
self.choices = choices
self.validation = validation
def __get__(self, instance, owner):
"""Getter of the descriptor protocol."""
if instance is None:
return self
else:
if self.dest not in instance._numina_desc_val:
instance._numina_desc_val[self.dest] = self.default_value()
return instance._numina_desc_val[self.dest]
def __set__(self, instance, value):
"""Setter of the descriptor protocol."""
cval = self.convert(value)
if self.choices and (cval not in self.choices):
raise ValueError('{} not in {}'.format(cval, self.choices))
instance._numina_desc_val[self.dest] = cval
def convert(self, val):
return self.type.convert(val)
def validate(self, val):
if self.validation:
return self.type.validate(val)
return True
def default_value(self):
if self.default is not None:
return self.convert(self.default)
if self.type.default is not None:
return self.type.default
if self.optional:
return None
else:
fmt = 'Required {0!r} of type {1!r} is not defined'
msg = fmt.format(self.dest, self.type)
raise ValueError(msg)
class Product(EntryHolder):
'''Product holder for RecipeResult.'''
def __init__(self, ptype, description='', validation=True,
dest=None, optional=False, default=None, *args, **kwds):
super(Product, self).__init__(
ptype, description, dest, optional,
default, choices=None, validation=validation
)
# if not isinstance(self.type, DataProductType):
# raise TypeError('type must be of class DataProduct')
def __repr__(self):
return 'Product(type=%r, dest=%r)' % (self.type, self.dest)
|
gpl-3.0
| -1,352,558,844,343,390,200
| 31.561905
| 75
| 0.622112
| false
| 4.094611
| false
| false
| false
|
abeing/droog
|
droog/message.py
|
1
|
2589
|
# Droog
# Copyright (C) 2015 Adam Miezianko
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Droog - Message
The message module defines the Message class which is a queue of messages to
display in the user interface.
"""
import Queue
import logging
import english
LOG = logging.getLogger(__name__)
class Messages(object):
"""The Messages class allows various components to add messages to be
displayed in the user interface. The user interface can then filter and
format the messages."""
def __init__(self, turn=None, history_size=100):
self._queue = Queue.Queue()
self.history = []
self._history_size = history_size
self._turn = turn
def add(self, message, clean=True):
"""Add a message to the message queue.
clean -- If true, process it for proper English form
"""
if clean:
message = english.make_sentence(message)
if len(message) is not 0:
LOG.info("Adding '%s' to the message queue.", message)
time = self._turn.current_time() if self._turn else ""
self._queue.put((message, time))
else:
LOG.warning("Zero-length message not added to the message queue.")
def empty(self):
"""True if the message queue is empty."""
return self._queue.empty()
def get(self):
"""Return the next message in the queue."""
message, time = self._queue.get()
if self._history_size > 0 and len(self.history) >= self._history_size:
self.history.pop(0)
self.history.append((message, time))
return message
def get_history(self, index, time=True):
"""Get an (optionally time-stamped) message from the history."""
if index > len(self.history):
return ""
text, time = self.history[index]
if time:
return "%s %s" % (time, text)
return text
|
gpl-2.0
| -1,923,911,494,254,833,700
| 33.52
| 78
| 0.649672
| false
| 4.169082
| false
| false
| false
|
ConsumerAffairs/django-document-similarity
|
docsim/documents/migrations/0002_auto__add_cluster.py
|
1
|
2213
|
# -*- coding: utf-8 -*-
# flake8: noqa
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Cluster'
db.create_table('documents_cluster', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('parameters', self.gf('django.db.models.fields.TextField')(default={})),
))
db.send_create_signal('documents', ['Cluster'])
# Adding M2M table for field documents on 'Cluster'
db.create_table('documents_cluster_documents', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('cluster', models.ForeignKey(orm['documents.cluster'], null=False)),
('document', models.ForeignKey(orm['documents.document'], null=False))
))
db.create_unique('documents_cluster_documents', ['cluster_id', 'document_id'])
def backwards(self, orm):
# Deleting model 'Cluster'
db.delete_table('documents_cluster')
# Removing M2M table for field documents on 'Cluster'
db.delete_table('documents_cluster_documents')
models = {
'documents.cluster': {
'Meta': {'object_name': 'Cluster'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'documents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['documents.Document']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '{}'})
},
'documents.document': {
'Meta': {'object_name': 'Document'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['documents']
|
agpl-3.0
| 4,228,240,815,063,617,500
| 41.557692
| 175
| 0.591505
| false
| 3.944742
| false
| false
| false
|
astrofrog/python-montage
|
montage/commands.py
|
1
|
80203
|
import subprocess
import status
import shlex
from commands_extra import *
def mAdd(images_table, template_header, out_image, img_dir=None,
no_area=False, type=None, exact=False, debug_level=None,
status_file=None, mpi=False, n_proc=8):
'''
Coadd the reprojected images in an input list to form an output mosaic
with FITS header keywords specified in a header file. Creates two output
files, one containing the coadded pixel values, and the other containing
coadded pixel area values. The pixel area values can be used as a
weighting function if the output pixel values are themselves to be coadded
with other projected images, and may also be used in validating the
fidelity of the output pixel values.
Required Arguments:
*images_table* [ value ]
ASCII table (generated by mImgtbl) containing metadata for all
images to be coadded.
*template_header* [ value ]
FITS header template to be used in generation of output FITS
*out_image* [ value ]
Name of output FITS image.
Optional Arguments:
*img_dir* [ value ]
Specifies path to directory containing reprojected images. If the
img_dir option is not included, mAdd will look for the input
images in the current working directory.
*no_area* [ True | False ]
Co-addition ignores weighting by pixel areas and performs
coaddition based only on pixel postions. Will not output an area
image for the output image.
*type* [ value ]
Select type of averaging to be done on accumulated pixel values
(either mean or median). To generate a map showing counts of how
man_y times each pixel was overlapped by the input images, use
count.
*exact* [ True | False ]
Enables exact size mode. The output image will match the header
template exactly, instead of shrinking the output to fit the data.
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
mAdd output and errors will be written to status_file instead of
stdout.
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mAddMPI" % n_proc
else:
command = "mAdd"
if img_dir:
command += " -p %s" % str(img_dir)
if no_area:
command += " -n"
if type:
command += " -a %s" % str(type)
if exact:
command += " -e"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(template_header)
command += " " + str(out_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mAdd", p.stdout.read().strip())
def mAddExec(images_table, template_header, tile_dir, out_image, img_dir=None,
no_area=False, type=None, exact=False, debug_level=None,
status_file=None, mpi=False, n_proc=8):
'''
Builds a series of outputs (which together make up a tiled output) through
multiple executions of the mAdd modules.
Required Arguments:
*images_table* [ value ]
ASCII table (generated by mImgtbl) containing metadata for all
images to be coadded.
*template_header* [ value ]
FITS header template to be used in generation of output FITS
*tile_dir* [ value ]
Directory to contain output tile images and header templates
*out_image* [ value ]
Prefix for output tile images
Optional Arguments:
*img_dir* [ value ]
Specifies path to directory containing reprojected images. If the
img_dir option is not included, mAdd will look for the input
images in the current working directory.
*no_area* [ True | False ]
Co-addition ignores weighting by pixel areas and performs
coaddition based only on pixel postions. Will not output an area
image for the output image.
*type* [ value ]
Select type of averaging to be done on accumulated pixel values
(either mean or median).
*exact* [ True | False ]
Enables exact size mode. The output image will match the header
template exactly, instead of shrinking the output to fit the data.
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
mAdd output and errors will be written to status_file instead of
stdout.
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mAddExecMPI" % n_proc
else:
command = "mAddExec"
if img_dir:
command += " -p %s" % str(img_dir)
if no_area:
command += " -n"
if type:
command += " -a %s" % str(type)
if exact:
command += " -e"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(template_header)
command += " " + str(tile_dir)
command += " " + str(out_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mAddExec", p.stdout.read().strip())
def mArchiveExec(region_table, debug_level=None):
'''
Given a table of archive images (generated by mArchiveList), calls
mArchiveGet on each one in sequence to retrieve all the files into the
current directory.
Required Arguments:
*region_table* [ value ]
Table of archive images, generated by mArchiveList.
Optional Arguments:
*debug_level* [ value ]
Prints out additional debugging information; in this version, the
only supported level is 1.
'''
command = "mArchiveExec"
if debug_level:
command += " -d %s" % str(debug_level)
command += " " + str(region_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mArchiveExec", p.stdout.read().strip())
def mArchiveGet(remote_ref, local_file, debug=False, raw=False):
'''
Retrieve a single FITS image from a remote archive, using a basic URL GET
but with a structured output.
Required Arguments:
*remote_ref* [ value ]
URL of remote FITS file to retrieve (should be in quotes). See
mArchiveList for more information.
*local_file* [ value ]
Full path/filename of the retrieved file.
Optional Arguments:
*debug* [ True | False ]
Print additional debugging information.
*raw* [ True | False ]
"Raw" mode - use a raw HTTP GET (no "HTTP/1.1" etc in the header);
necessary for communication with some servers.
'''
command = "mArchiveGet"
if debug:
command += " -d"
if raw:
command += " -r"
command += ' "' + str(remote_ref) + '"'
command += " " + str(local_file)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mArchiveGet", p.stdout.read().strip())
def mArchiveList(survey, band, object_or_location, width, height, out_file):
'''
Given a location on the sky, archive name, and size in degrees, contact
the IRSA server to retrieve a list of archive images. The list contains
enough information to support mArchiveGet downloads.
Required Arguments:
*survey* [ value ]
Can be one of: 2MASS DSS SDSS DPOSS
*band* [ value ]
Case insensitive - can be one of: (2MASS) j, h, k (SDSS) u, g, r,
i, z (DPOSS) f, j, n (DSS) DSS1, DSS1R, DSS1B, DSS2, DSS2B, DSS2R,
DSS2IR
*object_or_location* [ value ]
Object name or coordinate string to be resolved by NED (if string
includes spaces, must be surrounded by double quotes)
*width* [ value ]
Width of area of interest, in degrees
*height* [ value ]
Height of area of interest, in degrees
*out_file* [ value ]
Path to output table
'''
command = "mArchiveList"
command += " " + str(survey)
command += " " + str(band)
command += ' "' + str(object_or_location) + '"'
command += " " + str(width)
command += " " + str(height)
command += " " + str(out_file)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mArchiveList", p.stdout.read().strip())
def mBackground(in_image, out_image, A, B, C, debug_level=None, no_area=False,
status_file=None):
'''
Remove a background plane from a FITS image. The background correction
applied to the image is specified as Ax+By+C, where (x,y) is the pixel
coordinate using the image center as the origin, and (A,B,C) are the
background plane parameters specified as linear coefficients. To run in
'table' mode, see mBackground_tab.
Required Arguments:
*in_image* [ value ]
Input FITS file
*out_image* [ value ]
Output FITS file
*A, B, C* [ value ]
Corrections (as given by mFitplane or mFitExec)
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level.
*no_area* [ True | False ]
Indicates that no area images are present (assumes equal weighting
for each data pixel)
*status_file* [ value ]
mBackground output and errors will be written to status_file
instead of stdout.
'''
command = "mBackground"
if debug_level:
command += " -d %s" % str(debug_level)
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(A)
command += " " + str(B)
command += " " + str(C)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBackground", p.stdout.read().strip())
def mBackground_tab(in_image, out_image, images_table, corrections_table,
debug_level=None, no_area=False, status_file=None):
'''
Remove a background plane from a FITS image. The background correction
applied to the image is specified as Ax+By+C, where (x,y) is the pixel
coordinate using the image center as the origin, and (A,B,C) are the
background plane parameters specified as linear coefficients. This method
runs mBackground_tab in 'table' mode.
Required Arguments:
*in_image* [ value ]
Input FITS file
*out_image* [ value ]
Output FITS file
*images_table* [ value ]
Image metadata table to retrieve the filenames of images.
*corrections_table* [ value ]
Table of corrections (from mFitplane and mFitExec) to apply to the
corresponding image (from images_table).
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level.
*no_area* [ True | False ]
Indicates that no area images are present (assumes equal weighting
for each data pixel)
*status_file* [ value ]
mBackground_tab output and errors will be written to status_file
instead of stdout.
'''
command = "mBackground_tab"
if debug_level:
command += " -d %s" % str(debug_level)
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(images_table)
command += " " + str(corrections_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBackground_tab", p.stdout.read().strip())
def mBestImage(images_table, ra, dec, debug_level=None):
'''
Given a list of images and a position on the sky, determine which image
covers the location "best" (i.e., the one where the position is farthest
from the nearest edge).
Required Arguments:
*images_table* [ value ]
Input table of image metadata (as generated by mImgtbl).
*ra* [ value ]
RA of location of interest (in degrees)
*dec* [ value ]
Declination of location of interest (in degrees)
Optional Arguments:
*debug_level* [ value ]
Turn on debugging to the specified level (1 or 2)
'''
command = "mBestImage"
if debug_level:
command += " -d %s" % str(debug_level)
command += " " + str(images_table)
command += " " + str(ra)
command += " " + str(dec)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBestImage", p.stdout.read().strip())
def mBgExec(images_table, corrections_table, corr_dir, proj_dir=None,
status_file=None, debug=False, no_area=False, mpi=False, n_proc=8):
'''
Runs mBackground on all the images in a metadata table, using the
corrections generated by mFitExec.
Required Arguments:
*images_table* [ value ]
Image metadata table generated by mImgtbl.
*corrections_table* [ value ]
Table of corrections generated by mFitExec
*corr_dir* [ value ]
Directory where output images should be written
Optional Arguments:
*proj_dir* [ value ]
Specifies the path to the directory containing the projected
images.
*status_file* [ value ]
Writes output message to status_file instead of to stdout
*debug* [ True | False ]
Turns on debugging
*no_area* [ True | False ]
Indicates that no area images are present (assumes equal weighting
for each pixel)
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mBgExecMPI" % n_proc
else:
command = "mBgExec"
if proj_dir:
command += " -p %s" % str(proj_dir)
if status_file:
command += " -s %s" % str(status_file)
if debug:
command += " -d"
if no_area:
command += " -n"
command += " " + str(images_table)
command += " " + str(corrections_table)
command += " " + str(corr_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBgExec", p.stdout.read().strip())
def mBgModel(images_table, fits_table, corrections_table, n_iter=None,
level_only=False, debug_level=None, ref_img=None, status_file=None):
'''
mBgModel is a modelling/fitting program. It uses the image-to-image
difference parameter table created by mFitExec to interactively determine
a set of corrections to apply to each image in order to achieve a "best"
global fit.
Required Arguments:
*images_table* [ value ]
Image metadata table generated by mImgtbl.
*fits_table* [ value ]
Plane fitting table generated by mFitExec.
*corrections_table* [ value ]
Output table of background corrections
Optional Arguments:
*n_iter* [ value ]
Number of iterations (without option, defaults to 5000). Can be
between 1 and 32767.
*level_only* [ True | False ]
Calculate level adjustments only (ie, don't attempt to match the
slopes)
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*ref_img* [ value ]
Turns on additional debugging for the nth image in images_table.
*status_file* [ value ]
mBgModel output and errors are written to status_file instead of
to stdout.
'''
command = "mBgModel"
if n_iter:
command += " -i %s" % str(n_iter)
if level_only:
command += " -l"
if debug_level:
command += " -d %s" % str(debug_level)
if ref_img:
command += " -r %s" % str(ref_img)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(fits_table)
command += " " + str(corrections_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mBgModel", p.stdout.read().strip())
def mCatMap(in_table, out_image, template_header, column=None, ref_mag=None,
debug_level=None, size=None):
'''
mCatMap is a point-source imaging program. The user defines a general
output FITS image, and its pixels are populated from a table of point
sources. The source fluxes (or just source counts) from the table are
added into the appropriate pixel to create an output image.
Required Arguments:
*in_table* [ value ]
Input table of source metadata.
*out_image* [ value ]
Path of output FITS file.
*template_header* [ value ]
ASCII header template defining output FITS file.
Optional Arguments:
*column* [ value ]
Name of the table column that contains flux levels. If not
specified, pixels will be populated with source counts rather than
summed flux values.
*ref_mag* [ value ]
Set a reference magnitude to use when calculating fluxes.
*debug_level* [ value ]
Turn on debugging to the specified level (1-3)
*size* [ value ]
Set a spread size for point sources (default is to use no spread).
Allowed values are 3 or 5.
'''
command = "mCatMap"
if column:
command += " -c %s" % str(column)
if ref_mag:
command += " -m %s" % str(ref_mag)
if debug_level:
command += " -d %s" % str(debug_level)
if size:
command += " -w %s" % str(size)
command += " " + str(in_table)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mCatMap", p.stdout.read().strip())
def mConvert(in_image, out_image, debug_level=None, status_file=None,
bitpix=None, min_val=None, max_val=None, blank_value=None):
'''
mConvert changes the datatype of an image. When converting to floating
point, no additional information is needed. However, when converting from
higher precision (e.g. 64-bit floating point) to lower (e.g. 16-bit
integer), scaling information is necessary. This can be given explicitly
by the user or guessed by the program.
Required Arguments:
*in_image* [ value ]
Input image filename
*out_image* [ value ]
Output image filename.
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
mBgModel output and errors are written to status_file instead of
to stdout.
*bitpix* [ value ]
BITPIX value for the ouput FITS file (default is -64). Possible
values are: 8 (character or unsigned binary integer), 16 (16-bit
integer), 32 (32-bit integer), -32 (single precision floating
point), -64 (double precision floating point).
*min_val* [ value ]
Pixel data value in the input image which should be treated as a
minimum (value of 0) in the output image when converting from
floating point to integer (default for BITPIX 8: 0; BITPIX 16:
-32767; BITPIX 32: -2147483647
*max_val* [ value ]
Pixel data value in the input image which should be treated as a
maximum (value of 255 or 32768) in the output image when
converting from floating point to integer (Default for BITPIX 8:
255; BITPIX 16: 32768; BITPIX 32: 2147483648)
*blank_value* [ value ]
If converting down to an integer scale: value to be used in the
output image to represent blank pixels (NaN) from the input image.
Default value is min_val.
'''
command = "mConvert"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
if bitpix:
command += " -b %s" % str(bitpix)
if min_val:
command += " -min %s" % str(min_val)
if max_val:
command += " -max %s" % str(max_val)
if blank_value:
command += " -blank %s" % str(blank_value)
command += " " + str(in_image)
command += " " + str(out_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mConvert", p.stdout.read().strip())
def mDiff(in_image_1, in_image_2, out_image, template_header,
debug_level=None, no_area=False, status_file=None):
'''
mDiff calculates a simple difference between a single pair of overlapping
images. This is meant for use on reprojected images where the pixels
already line up exactly. mDiff analyzes an image metadata table to
determine a list of overlapping images. Each image is compared with every
other image to determine all overlapping image pairs. A pair of images
are deemed to overlap if any pixel around the perimeter of one image falls
within the boundary of the other image.
Required Arguments:
*in_image_1* [ value ]
First input FITS file (Also needs area image in1_area_image, or
use the no_area option)
*in_image_2* [ value ]
Second input FITS file.(Also needs area image in2_area_image, or
use the no_area option)
*out_image* [ value ]
Difference FITS image to be generated.
*template_header* [ value ]
FITS header template used to generate output image.
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level (1-4).
*no_area* [ True | False ]
No-area-images option. Creates difference image without requiring
pixel area FITS image
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mDiff"
if debug_level:
command += " -d %s" % str(debug_level)
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image_1)
command += " " + str(in_image_2)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mDiff", p.stdout.read().strip())
def mDiffExec(diffs_table, template_header, diff_dir, proj_dir=None,
debug=False, no_area=False, status_file=None, mpi=False, n_proc=8):
'''
Runs mDiff on all the pairs identified by mOverlaps.
Required Arguments:
*diffs_table* [ value ]
Table generated by mOverlaps for the images in proj_dir.
*template_header* [ value ]
FITS header template for output files.
*diff_dir* [ value ]
Path to output files.
Optional Arguments:
*proj_dir* [ value ]
Specifies path to the directory containing reprojected input
images.
*debug* [ True | False ]
Turns on debugging.
*no_area* [ True | False ]
No-area-images option. Creates difference image without requiring
_area FITS images
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mDiffExecMPI" % n_proc
else:
command = "mDiffExec"
if proj_dir:
command += " -p %s" % str(proj_dir)
if debug:
command += " -d"
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(diffs_table)
command += " " + str(template_header)
command += " " + str(diff_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mDiffExec", p.stdout.read().strip())
def mDiffFitExec(diffs_table, fits_table, diff_dir, debug=False,
status_file=None):
'''
Using the table of overlaps found by mOverlaps, mDiffFitExec runs both
mDiff and mFitplane for each record. The fitting parameters are written
to a file to be used by mBgModel.
Required Arguments:
*diffs_table* [ value ]
Overlap table generated by mOverlaps, the last column of which
contains the filenames of the difference images generated by
mDiffExec.
*fits_table* [ value ]
Output table of difference paramaters.
*diff_dir* [ value ]
Directory containing difference images.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging
*status_file* [ value ]
Writes output message to status_file instead of to stdout
'''
command = "mDiffFitExec"
if debug:
command += " -d"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(diffs_table)
command += " " + str(fits_table)
command += " " + str(diff_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mDiffFitExec", p.stdout.read().strip())
def mExec(survey, band, raw_dir=None, n_tile_x=None, n_tile_y=None,
level_only=False, keep=False, corners=False, output_image=None,
debug_level=None, region_header=None, header=None,
workspace_dir=None):
'''
The mExec module is a mosaicking executive for 2MASS, SDSS, and DSS data.
It includes remote data and metadata access. Alternatively, users can
mosaic a set of data already on disk.
Required Arguments:
*survey, band* [ value ]
If not mosaicking user-provided data (raw_dir option), must select
one of the following combinations of survey and band: 2MASS [j, h,
k] SDSS [u, g, r, i, z] DSS [DSS1, DSS1R, DSS1B, DSS2, DSS2B,
DSS2R, DSS2IR]
Optional Arguments:
*raw_dir* [ value ]
Provide path to directory containing original ("raw") data which
will be reprojected and mosaicked. Not necessary if using mExec to
retrieve remote data from the 2MASS, SDSS or DSS surveys.
*n_tile_x* [ value ]
Number of output tiles to create along the X-axis - default is 1
for a single mosaicked image.
*n_tile_y* [ value ]
Number of output tiles to create along the Y-axis - default is
equal to n_tile_x.
*level_only* [ True | False ]
"Level-only" option (see mBgModel)
*keep* [ True | False ]
If retrieving data from a remote archive, the "keep" option will
leave the original data products on disk after generating a
mosaic. Without this option, raw data will be deleted (unless it
was provided by the user with the "-r" option).
*corners* [ True | False ]
Remove all temporary files and intermediate data products. Note:
if not using the '-o' option to specify an output file, this will
also remove mosaic_image.
*output_image* [ value ]
Provide your own filename for the output mosaic. Default filename
is "mosaic_image."
*debug_level* [ value ]
Print out additional debugging information (levels 1-4)
*region_header* [ value ]
Path to header template used to create mosaic.
*header* [ value ]
Provide header template as text input rather than point to a file;
see sample shell script that makes use of this option.
*workspace_dir* [ value ]
Directory where intermediate files will be created. If no
workspace is given, a unique local subdirectory will be created
(e.g.; ./MOSAIC_AAAaa17v)
'''
command = "mExec"
if raw_dir:
command += " -r %s" % str(raw_dir)
if n_tile_x:
command += " -n %s" % str(n_tile_x)
if n_tile_y:
command += " -m %s" % str(n_tile_y)
if level_only:
command += " -l"
if keep:
command += " -k"
if corners:
command += " -c"
if output_image:
command += " -o %s" % str(output_image)
if debug_level:
command += " -d %s" % str(debug_level)
if region_header:
command += " -f %s" % str(region_header)
if header:
command += " -h %s" % str(header)
command += " " + str(survey)
command += " " + str(band)
if workspace_dir:
command += " %s" % str(workspace_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mExec", p.stdout.read().strip())
def mFitExec(diffs_table, fits_table, diff_dir, debug=False, status_file=None,
mpi=False, n_proc=8):
'''
Runs mFitplane on all the difference images identified by mOverlaps and
generated by mDiff or mDiffExec. mFitExec creates a table of image-to-
image difference parameters.
Required Arguments:
*diffs_table* [ value ]
Overlap table generated by mOverlaps, the last column of which
contains the filenames of the difference images generated by
mDiffExec.
*fits_table* [ value ]
Output table of difference paramaters.
*diff_dir* [ value ]
Directory containing difference images.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging
*status_file* [ value ]
Writes output message to status_file instead of to stdout
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mFitExecMPI" % n_proc
else:
command = "mFitExec"
if debug:
command += " -d"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(diffs_table)
command += " " + str(fits_table)
command += " " + str(diff_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mFitExec", p.stdout.read().strip())
def mFitplane(in_image, border=None, debug_level=None, status_file=None):
'''
Uses least squares to fit a plane (excluding outlier pixels) to an image.
It is used on the difference images generated using mDiff or mDiffExec.
Required Arguments:
*in_image* [ value ]
Input FITS file is a difference file between two other FITS files,
as can be generated using mDiff.
Optional Arguments:
*border* [ value ]
Number of border pixels to ignore at edges of image.
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
Output and errors are written to status_file instead of stdout.
'''
command = "mFitplane"
if border:
command += " -b %s" % str(border)
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mFitplane", p.stdout.read().strip())
def mFixNan(in_image, out_image, debug_level=None, nan_value=None,
min_blank=None, max_blank=None):
'''
Converts NaNs found in the image to some other value (given by the user),
or converts a range of supplied values into NaNs.
Required Arguments:
*in_image* [ value ]
Input FITS image file
*out_image* [ value ]
Path of output FITS file. To run in "count" mode without creating
an output file, use a dash ("-") for this argument.
Optional Arguments:
*debug_level* [ value ]
Turn on debugging to the specified level (1-3)
*nan_value* [ value ]
Value to use in place of an_y NaNs
*min_blank, max_blank* [ value ]
If the nan_value option is not used, mFixNaN will replace all
pixel values between min_blank and max_blank with NaN.
'''
command = "mFixNan"
if debug_level:
command += " -d %s" % str(debug_level)
if nan_value:
command += " -v %s" % str(nan_value)
command += " " + str(in_image)
command += " " + str(out_image)
if min_blank and max_blank:
command += " %s" % str(min_blank)
command += " %s" % str(max_blank)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mFixNan", p.stdout.read().strip())
def mFlattenExec(images_table, flat_dir, img_dir=None, debug=False,
no_area=False, status_file=None):
'''
Runs both mFitPlane and mBackground on a set of images.
Required Arguments:
*images_table* [ value ]
Metadata table (generated by mImgtbl) describing images to be
flattened.
*flat_dir* [ value ]
Path to directory where output files should be created.
Optional Arguments:
*img_dir* [ value ]
Specifies path to directory containing images to be flattened.
*debug* [ True | False ]
Turns on debugging.
*no_area* [ True | False ]
No-area-images option, indicating that mFlattenExec should not
require area images for all the input FITS images.
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mFlattenExec"
if img_dir:
command += " -p %s" % str(img_dir)
if debug:
command += " -d"
if no_area:
command += " -n"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(flat_dir)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mFlattenExec", p.stdout.read().strip())
def mGetHdr(in_image, img_header, debug=False, hdu=None, status_file=None):
'''
Reads in the header from a FITS image and prints it out to a text file.
Required Arguments:
*in_image* [ value ]
Path to FITS image from which to retrieve the header.
*img_header* [ value ]
Path to text file where FITS header should be written.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging.
*hdu* [ value ]
Retrieve the header from the Fits extention given by hdu. "0"
indicates the primary FITS extension, and is the default used by
mGetHdr.
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mGetHdr"
if debug:
command += " -d"
if hdu:
command += " -h %s" % str(hdu)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(img_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mGetHdr", p.stdout.read().strip())
def mHdr(object_or_location, width, out_file, system=None, equinox=None,
height=None, pix_size=None, rotation=None):
'''
Connects to the IRSA service HdrTemplate to create a header template based
on a location, size, resolution and rotation.
Required Arguments:
*object_or_location* [ value ]
Object string or coordinate location
*width* [ value ]
Width (x-axis) of area
*out_file* [ value ]
Path to output header template
Optional Arguments:
*system* [ value ]
Specify a coordinate system. Can be one of: "equatorial" or "eq"
(default), "ecliptic" or "ec" "galactic", "ga", "supergalactic" or
"sgal"
*equinox* [ value ]
Specify an equinox. Default is 2000.0
*height* [ value ]
Height (y-axis) of area in degrees. Default is equal to width
*pix_size* [ value ]
Size of a pixel (in arcsec); default is 1
*rotation* [ value ]
Rotation of image; default is 0
'''
command = "mHdr"
if system:
command += " -s %s" % str(system)
if equinox:
command += " -e %s" % str(equinox)
if height:
command += " -h %s" % str(height)
if pix_size:
command += " -p %s" % str(pix_size)
if rotation:
command += " -r %s" % str(rotation)
command += ' "' + str(object_or_location) + '"'
command += " " + str(width)
command += " " + str(out_file)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mHdr", p.stdout.read().strip())
def mHdrCheck(in_image, status_file=None):
'''
mHdrCheck reads in the header from a FITS image (or an ASCII header
template file) and checks to see if any header lines are invalid. If it
finds one, it will print out a message stating which keyword is invalid
and exit before checking the rest of the header. It will not report on
multiple invalid values. If all value are correct, mHdrCheck will print
out a "Valid FITS/WCS" message.
Required Arguments:
*in_image* [ value ]
Path of FITS file to be validated.
Optional Arguments:
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mHdrCheck"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mHdrCheck", p.stdout.read().strip())
def mHdrtbl(directory, images_table, recursive=False, corners=False,
debug=False, output_invalid=False, status_file=None, img_list=None):
'''
mHdrtbl operates in a fashion similar to mImgtbl, but is used on a set of
header template files instead of FITS images.
Required Arguments:
*directory* [ value ]
Path to directory containing set of input header templates.
*images_table* [ value ]
Path of output metadata table.
Optional Arguments:
*recursive* [ True | False ]
mHdrtbl can also be used as a standalone program to gather image
metadata for other purposes (to populate a database, as a basis
for spatial coverage searches, etc.) In this case it is often
desirable to collect information on all the files in a directory
tree recursively. The recursive option instructs mHdrtbl to search
the given directory and all its subdirectories recursively.
*corners* [ True | False ]
The corners option in mHdrtbl will cause eight extra columns to be
added to the output metadata table containing the RA, Dec
coordinates (ra1, dec1, ... ra4, dec4) of the image corners. The
output is always Equatorial J2000, even if the input is some other
system. This has been done to make the metadata uniform so that it
can easily be used for coverage searches, etc. The corners option
is not needed for normal Montage processing.
*debug* [ True | False ]
Turn on debugging
*output_invalid* [ True | False ]
When this option is set, mHdrtbl will explicitly output each
header file it finds that does not appear to be valid, along with
information on the error.
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*img_list* [ value ]
mHdrtbl will only process files with names specified in table
img_list, ignoring an_y other files in the directory.
'''
command = "mHdrtbl"
if recursive:
command += " -r"
if corners:
command += " -c"
if debug:
command += " -d"
if output_invalid:
command += " -b"
if status_file:
command += " -s %s" % str(status_file)
if img_list:
command += " -t %s" % str(img_list)
command += " " + str(directory)
command += " " + str(images_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mHdrtbl", p.stdout.read().strip())
def mImgtbl(directory, images_table, recursive=False, corners=False,
include_area=False, debug=False, output_invalid=False,
status_file=None, fieldlist=None, img_list=None):
'''
mImgtbl extracts the FITS header geometry information from a set of files
and creates an ASCII image metadata table which is used by several of the
other programs. It only collects data from headers that comply with the
FITS standard, but reports a count of images that fail that check.
Required Arguments:
*directory* [ value ]
Path to directory containing set of input FITS files.
*images_table* [ value ]
Path of output metadata table.
Optional Arguments:
*recursive* [ True | False ]
mImgtbl can also be used as a standalone program to gather image
metadata for other purposes (to populate a database, as a basis
for spatial coverage searches, etc.) In this case it is often
desirable to collect information on all the files in a directory
tree recursively. The recursive option instructs mImgtbl to search
the given directory and all its subdirectories recursively.
*corners* [ True | False ]
The corners option in mImgtbl will cause eight extra columns to be
added to the output metadata table containing the RA, Dec
coordinates (ra1, dec1, ... ra4, dec4) of the image corners. The
output is always Equatorial J2000, even if the input is some other
system. Though not required for the core processing modules, we
recommend using this option, as some of the utilities may require
corner locations to function properly.
*include_area* [ True | False ]
By default, mImgtbl ignores FITS files with names ending in _area
(i.e. name_area_image), assuming them to be Montage-created area
images. If you want to generate information on these images, or if
you have images with _area in the title other than those generated
by Montage, you should turn on this option to force mImgtbl to
look at all images in the directory.
*debug* [ True | False ]
Turn on debugging
*output_invalid* [ True | False ]
When this option is set, mImgtbl will explicitly output each FITS
file it finds that does not appear to be valid, along with
information on the error.
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*fieldlist* [ value ]
Used to specify a fieldlist, which will list additional keywords
to be read from the FITS headers and included in the output table.
Fieldlists should specify the keyword name, type
(int,char,double), and size.
*img_list* [ value ]
mImgtbl will only process files with names specified in table
img_list, ignoring an_y other files in the directory.
'''
command = "mImgtbl"
if recursive:
command += " -r"
if corners:
command += " -c"
if include_area:
command += " -a"
if debug:
command += " -d"
if output_invalid:
command += " -b"
if status_file:
command += " -s %s" % str(status_file)
if fieldlist:
command += " -f %s" % str(fieldlist)
if img_list:
command += " -t %s" % str(img_list)
command += " " + str(directory)
command += " " + str(images_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mImgtbl", p.stdout.read().strip())
def mMakeHdr(images_table, template_header, debug_level=None,
status_file=None, cdelt=None, north_aligned=False, system=None,
equinox=None):
'''
From a list of images to be mosaicked together, mMakeHdr generates the
FITS header that best describes the output image.
Required Arguments:
*images_table* [ value ]
Metadata table (generated by mImgtbl) describing the images to be
mosaicked.
*template_header* [ value ]
Path to header template to be generated.
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*status_file* [ value ]
Output and errors are written to status_file instead of to stdout.
*cdelt* [ value ]
Specify a pixel scale for the header, if different from the input
images
*north_aligned* [ True | False ]
"North-aligned" option. By default, the FITS header generated
represents the best fit to the images, often resulting in a slight
rotation. If you want north to be straight up in your final
mosaic, you should use this option.
*system* [ value ]
Specifies the system for the header (default is Equatorial).
Possible values are: EQUJ EQUB ECLJ ECLB GAL SGAL
*equinox* [ value ]
If a coordinate system is specified, the equinox can also be given
in the form YYYY. Default is J2000.
'''
command = "mMakeHdr"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
if cdelt:
command += " -p %s" % str(cdelt)
if north_aligned:
command += " -n"
command += " " + str(images_table)
command += " " + str(template_header)
if system:
command += " %s" % str(system)
if equinox:
command += " %s" % str(equinox)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mMakeHdr", p.stdout.read().strip())
def mOverlaps(images_table, diffs_table, exact=False, debug_level=None,
status_file=None):
'''
Analyze an image metadata table to determine a list of overlapping images.
Each image is compared with every other image to determine all overlapping
image pairs. A pair of images are deemed to overlap if any pixel around
the perimeter of one image falls within the boundary of the other image.
Required Arguments:
*images_table* [ value ]
Table of image metadata generated by mImgtbl.
*diffs_table* [ value ]
Path of output table to be generated containing overlap
information.
Optional Arguments:
*exact* [ True | False ]
Enables 'exact' overlaps mode, as opposed to the default
approximate algorithm. The default mode uses great-circle
connecting lines between image corners to determine which images
overlap. Exact mode will instead check the edge pixels of every
image to determine which pixels are inside the others. Although
the default mode will occasionally report some incorrect overlaps,
this is not a concern since mDiff will detect and ignore these
false positive results when processing the table.
*debug_level* [ value ]
Turns on debugging to the specified level (1 or 2)
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mOverlaps"
if exact:
command += " -e"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(diffs_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mOverlaps", p.stdout.read().strip())
def mPix2Coord(template_header, ixpix, jypix, debug=False):
'''
Takes an image FITS header template and a pixel (x,y) coordinate, and
outputs the corresponding sky location.
Required Arguments:
*template_header* [ value ]
ASCII header template describing the image (either a FITS image,
or a JPEG file created from the FITS file)
*ixpix* [ value ]
X coordinate (pixel location) on image
*jypix* [ value ]
Y coordinate (pixel location) on image
Optional Arguments:
*debug* [ True | False ]
Print out additional debugging information
'''
command = "mPix2Coord"
if debug:
command += " -d"
command += " " + str(template_header)
command += " " + str(ixpix)
command += " " + str(jypix)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mPix2Coord", p.stdout.read().strip())
def mProject(in_image, out_image, template_header, factor=None,
debug_level=None, status_file=None, hdu=None, scale=None,
weight_file=None, threshold=None, whole=False):
'''
mProject reprojects a single image to the scale defined in a FITS header
template file (read more about header templates here). The program
produces a pair of images: the reprojected image and an "area" image
consisting of the fraction input pixel sky area that went into each output
pixel. The "drizzle" algorithm is implemented. The algorithm proceeds by
mapping pixel corners (as adjusted by drizzle, if called) from the input
pixel space to the output pixel space, calculating overlap area with each
output pixel, and accumulating an appropriate fraction of the input flux
into the output image pixels. In addition, the appropriate fraction of
the input pixel area is accumulated into the area image pixels.
Projection of points from input pixel space to output pixel space is
calculated in two steps: first map from input pixel space to sky
coordinates; second map from sky coordinates to output pixel space.
Required Arguments:
*in_image* [ value ]
Input FITS file to be reprojected.
*out_image* [ value ]
Path of output FITS file to be created.
*template_header* [ value ]
FITS header template to be used in generation of output image
Optional Arguments:
*factor* [ value ]
Processing is done utilizing the drizzle algorithm. factor is a
floating point number; recommended drizzle factors are from 0.5 to
1.
*debug_level* [ value ]
Causes additional debugging information to be printed to stdout.
Valid levels are 1-5 (for higher debugging levels, it is
recommended to redirect the output to a file).
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*hdu* [ value ]
Use the specified FITS extension (default is to use the first HDU
with image data)
*scale* [ value ]
Apply a correction factor of scale to each pixel
*weight_file* [ value ]
Path to a weight map to be used when reading values from the input
image.
*threshold* [ value ]
Pixels with weights below threshold will be treated as blank.
*whole* [ True | False ]
Makes the output region (originally defined in the header
template) big enough to include all of the input images
'''
command = "mProject"
if factor:
command += " -z %s" % str(factor)
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
if hdu:
command += " -h %s" % str(hdu)
if scale:
command += " -x %s" % str(scale)
if weight_file:
command += " -w %s" % str(weight_file)
if threshold:
command += " -t %s" % str(threshold)
if whole:
command += " -X"
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mProject", p.stdout.read().strip())
def mProjectPP(in_image, out_image, template_header, factor=None,
debug_level=None, border=None, status_file=None,
alternate_header=None, hdu=None, scale=None, weight_file=None,
threshold=None, whole=False):
'''
mProjectPP reprojects a single image to the scale defined in an alternate
FITS header template generated (usually) by mTANhdr. The program produces
a pair of images: the reprojected image and an "area" image consisting of
the fraction input pixel sky area that went into each output pixel. This
area image goes through all the subsequent processing that the reprojected
image does, allowing it to be properly coadded at the end.
Required Arguments:
*in_image* [ value ]
Input FITS file to be reprojected.
*out_image* [ value ]
Path to output FITS file to be created.
*template_header* [ value ]
FITS header template to be used in generation of output FITS
Optional Arguments:
*factor* [ value ]
Processing is done utilizing the drizzle algorithm. factor is a
floating point number; recommended drizzle factors are from 0.5 to
1.
*debug_level* [ value ]
Causes additional debugging information to be printed to stdout.
Valid levels are 1-5; for levels greater than 1, it's recommended
to redirect the output into a text file.
*border* [ value ]
Ignores border pixels around the image edge when performing
calculations.
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*alternate_header* [ value ]
Specifies an alternate FITS header for use in mProjectPP
calculations, allows substitution of psuedo-TAN headers created by
mTANHdr.
*hdu* [ value ]
Specify the FITS extension to re-project if the FITS image is
multi-extension.
*scale* [ value ]
Multiple the pixel values by scale when reprojecting. For
instance, each 2MASS image has a different scale factor (very near
1.0) to correct for varying magnitude-zero points.
*weight_file* [ value ]
Path to a weight map to be used when reading values from the input
image.
*threshold* [ value ]
If using a weight image; only use those pixels where the weight
value is above threshold.
*whole* [ True | False ]
Reproject the whole image even if part of it is outside the region
of interest (don't crop while re-projecting).
'''
command = "mProjectPP"
if factor:
command += " -z %s" % str(factor)
if debug_level:
command += " -d %s" % str(debug_level)
if border:
command += " -b %s" % str(border)
if status_file:
command += " -s %s" % str(status_file)
if alternate_header:
command += " -[i|o] %s" % str(alternate_header)
if hdu:
command += " -h %s" % str(hdu)
if scale:
command += " -x %s" % str(scale)
if weight_file:
command += " -w %s" % str(weight_file)
if threshold:
command += " -t %s" % str(threshold)
if whole:
command += " -X"
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mProjectPP", p.stdout.read().strip())
def mProjExec(images_table, template_header, proj_dir, stats_table,
raw_dir=None, debug=False, exact=False, whole=False, border=None,
restart_rec=None, status_file=None, scale_column=None, mpi=False,
n_proc=8):
'''
An executive which runs mProject (or, if possible for the input/output
projections, mProjectPP) for each image in an image metadata table.
Required Arguments:
*images_table* [ value ]
ASCII table (generated by mImgtbl) containing metadata for all
images to be reprojected.
*template_header* [ value ]
FITS header template to be used in generation of output FITS.
*proj_dir* [ value ]
Directory in which to create reprojected images.
*stats_table* [ value ]
Name of table for output statistics (time of each reprojection, or
error messages).
Optional Arguments:
*raw_dir* [ value ]
Specifies the path to the directory containing the images to be
reprojected. If the -p option is not included, mProjExec looks for
the images in the current working directory.
*debug* [ True | False ]
Turns on debugging
*exact* [ True | False ]
Flag indicating output image should exactly match the FITS header
template, and not crop off blank pixels
*whole* [ True | False ]
Force reprojection of whole images, even if they exceed the area
of the FITS header template
*border* [ value ]
Ignore border width of pixels around edge of images
*restart_rec* [ value ]
Allows restart at record number restart_rec, if mProjExec exits
upon an error
*status_file* [ value ]
Output and errors are written to status_file instead of being
written to stdout.
*scale_column* [ value ]
Turn on flux rescaling (e.g. magnitude zero point correction):
scale_column is the name of a column in images_table which
contains scale information.
*mpi* [ True | False ]
If set to True, will use the MPI-enabled versions of the Montage
executable.
*n_proc* [ value ]
If mpi is set to True, n_proc is the number of processes to run
simultaneously (default is 8)
'''
if mpi:
command = "mpirun -n %i mProjExecMPI" % n_proc
else:
command = "mProjExec"
if raw_dir:
command += " -p %s" % str(raw_dir)
if debug:
command += " -d"
if exact:
command += " -e"
if whole:
command += " -X"
if border:
command += " -b %s" % str(border)
if restart_rec:
command += " -r %s" % str(restart_rec)
if status_file:
command += " -s %s" % str(status_file)
if scale_column:
command += " -x %s" % str(scale_column)
command += " " + str(images_table)
command += " " + str(template_header)
command += " " + str(proj_dir)
command += " " + str(stats_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mProjExec", p.stdout.read().strip())
def mPutHdr(in_image, out_image, template_header, debug=False,
status_file=None, hdu=None):
'''
Replaces the header of the input file with one supplied by the user.
Required Arguments:
*in_image* [ value ]
Input FITS file.
*out_image* [ value ]
Path to output FITS file (with new header)
*template_header* [ value ]
ASCII header template to write into out_image.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging to the specified level (this version only
supports level "1").
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*hdu* [ value ]
Write to the specified FITS extnension (HDU).
'''
command = "mPutHdr"
if debug:
command += " -d"
if status_file:
command += " -s %s" % str(status_file)
if hdu:
command += " -h %s" % str(hdu)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(template_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mPutHdr", p.stdout.read().strip())
def mRotate(in_image, out_image, debug_level=None, status_file=None,
rotation_angle=None, ra=None, dec=None, xsize=None, ysize=None):
'''
Rotates a FITS image by an arbitrary angle. This module is meant for
quick-look only; it is not flux conserving.
Required Arguments:
*in_image* [ value ]
Input FITS image.
*out_image* [ value ]
Path to output (rotated) FITS image.
Optional Arguments:
*debug_level* [ value ]
Print out additional debugging information (level can be 1-3)
*status_file* [ value ]
Output and errors are written to status_file instead of stdout.
*rotation_angle* [ value ]
Provide an angle (in degrees) to rotate the image.
*ra, dec, xsize* [ value ]
Center location and width (in degrees) of output image - optional.
By default, entire input image area will be included in output
image.
*ysize* [ value ]
Height (in degrees) of output image, if a new center location and
width are provided. Only used if ra, dec, and xsize are specified.
Defaults to xsize.
'''
command = "mRotate"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
if rotation_angle:
command += " -r %s" % str(rotation_angle)
command += " " + str(in_image)
command += " " + str(out_image)
if ra and dec and xsize:
command += " %s" % str(ra)
command += " %s" % str(dec)
command += " %s" % str(xsize)
if ysize:
command += " %s" % str(ysize)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mRotate", p.stdout.read().strip())
def mShrink(in_image, out_image, factor, fixed_size=False, debug_level=None,
status_file=None):
'''
A utility for reducing the size of a FITS file, by averaging blocks of
pixels.
Required Arguments:
*in_image* [ value ]
Input FITS file
*out_image* [ value ]
Path to output FITS file.
*factor* [ value ]
Size of blocks, in pixels, to average. File size will be reduced
by 1/factor squared. If the fixed_size option is used, factor is
the desired width of the output image.
Optional Arguments:
*fixed_size* [ True | False ]
Fixed-size option - specify output size of image, instead of the
size of blocks of pixels to be averaged
*debug_level* [ value ]
Turns on debugging to the specified level (1-4).
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mShrink"
if fixed_size:
command += " -f"
if debug_level:
command += " -d %s" % str(debug_level)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(factor)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mShrink", p.stdout.read().strip())
def mSubimage(in_image, out_image, ra, dec, xsize, debug=False,
all_pixels=False, hdu=None, status_file=None, ysize=None):
'''
Creates a subimage (or "cutout") of a FITS file. To use mSubimage in
'pixel' mode, see mSubimage_pix
Required Arguments:
*in_image* [ value ]
Input FITS file.
*out_image* [ value ]
Path to output FITS file.
*ra* [ value ]
RA of center of output image.
*dec* [ value ]
Declination of center of output image.
*xsize* [ value ]
Width of output image in degrees.
Optional Arguments:
*debug* [ True | False ]
Turns on debugging.
*all_pixels* [ True | False ]
All pixels - Force retrieval of whole image (useful to extract an
entire HDU)
*hdu* [ value ]
Operate on the specified FITS header extension (HDU)
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*ysize* [ value ]
Height of output image in degrees (default is equal to xsize.
'''
command = "mSubimage"
if debug:
command += " -d"
if all_pixels:
command += " -a"
if hdu:
command += " -h %s" % str(hdu)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(ra)
command += " " + str(dec)
command += " " + str(xsize)
if ysize:
command += " %s" % str(ysize)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mSubimage", p.stdout.read().strip())
def mSubimage_pix(in_image, out_image, xstartpix, ystartpix, xpixsize,
debug=False, hdu=None, status_file=None, ypixsize=None):
'''
Creates a subimage (or "cutout") of a FITS file ('pixel' mode)
Required Arguments:
*in_image* [ value ]
Input FITS file.
*out_image* [ value ]
Path to output FITS file.
*xstartpix* [ value ]
Pixel along the x-axis where the cutout image will begin
*ystartpix* [ value ]
Pixel along the y-axis where the cutout image will begin
*xpixsize* [ value ]
Width of output image in pixels
Optional Arguments:
*debug* [ True | False ]
Turns on debugging.
*hdu* [ value ]
Operate on the specified FITS header extension (HDU)
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*ypixsize* [ value ]
Height of output image in pixels (default is equal to xpix_size
'''
command = "mSubimage -p"
if debug:
command += " -d"
if hdu:
command += " -h %s" % str(hdu)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(in_image)
command += " " + str(out_image)
command += " " + str(xstartpix)
command += " " + str(ystartpix)
command += " " + str(xpixsize)
if ypixsize:
command += " %s" % str(ypixsize)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mSubimage_pix", p.stdout.read().strip())
def mSubset(images_table, template_header, subset_table, debug_level=None,
fast_mode=False, status_file=None):
'''
Generates a table of images that is a subset of the input table,
containing only those images that cover the area defined by a given FITS
header.
Required Arguments:
*images_table* [ value ]
ASCII table (generated by mImgtbl) containing metadata for image
collection.
*template_header* [ value ]
FITS header template defining the area of interest.
*subset_table* [ value ]
Path to output table, which will contain only those FITS images
covering the area defined by template_header.
Optional Arguments:
*debug_level* [ value ]
Turns on debugging to the specified level (1-3).
*fast_mode* [ True | False ]
Fast mode - input file must include corners (corners option in
mImgtbl) to utilize this mode.
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
'''
command = "mSubset"
if debug_level:
command += " -d %s" % str(debug_level)
if fast_mode:
command += " -f"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(images_table)
command += " " + str(template_header)
command += " " + str(subset_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mSubset", p.stdout.read().strip())
def mTANHdr(orig_header, new_header, debug=False, order=None, max_iter=None,
tolerance=None, status_file=None):
'''
Analyzes a template file and determines if there would be an adequate
equivalent distorted TAN projection, within a specified tolerance, and
outputs the alternate header. This header can be used in conjunction with
mProjectPP to produce a TAN plane image. This process is considerably
faster than projecting with the general purpose tool mProject.
Required Arguments:
*orig_header* [ value ]
Input FITS header
*new_header* [ value ]
Path to output header to be created
Optional Arguments:
*debug* [ True | False ]
Print additional debugging information to stdout.
*order* [ value ]
Order of output header polynomial focal plane distortions (default
= 4)
*max_iter* [ value ]
Maximum number of iteration attempts to produce header (default =
50)
*tolerance* [ value ]
Distortion tolerance value for acceptable output (default = 0.01)
*status_file* [ value ]
Output and errors are written to status_file instead of stdout.
'''
command = "mTANHdr"
if debug:
command += " -d"
if order:
command += " -o %s" % str(order)
if max_iter:
command += " -i %s" % str(max_iter)
if tolerance:
command += " -t %s" % str(tolerance)
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(orig_header)
command += " " + str(new_header)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mTANHdr", p.stdout.read().strip())
def mTblSort(in_table, column_name, out_table, debug=False):
'''
Sorts a table on numeric values.
Required Arguments:
*in_table* [ value ]
Path to input table
*column_name* [ value ]
Name of column to sort on (column must contain numeric values)
*out_table* [ value ]
Path to output table
Optional Arguments:
*debug* [ True | False ]
Turns on debugging
'''
command = "mTblSort"
if debug:
command += " -d"
command += " " + str(in_table)
command += " " + str(column_name)
command += " " + str(out_table)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mTblSort", p.stdout.read().strip())
def mTileHdr(orig_header, new_header, n_x, n_y, ix, iy, debug=False,
status_file=None, xpad=None, ypad=None):
'''
Takes a header template file and creates another which represents one of a
regular set of tiles covering the original. The user specifies the tile
gridding and which tile is desired.
Required Arguments:
*orig_header* [ value ]
ASCII header template from which to derive tiled headers
*new_header* [ value ]
Path to output header
*n_x* [ value ]
Number of tiles in the x-direction
*n_y* [ value ]
Number of tiles in the y-direction
*ix* [ value ]
Integer identifying the x location of the output tile on the grid
(counting from 0)
*iy* [ value ]
Integer identifying the y location of the output tile on the grid
(counting from 0)
Optional Arguments:
*debug* [ True | False ]
Turns on debugging.
*status_file* [ value ]
Output and errors are sent to status_file instead of to stdout
*xpad* [ value ]
Number of pixels to overlap tiles in the x direction (default is
0)
*ypad* [ value ]
Number of pixels to overlap tiles in the y direction (default is
0). Only used if xpad is present.
'''
command = "mTileHdr"
if debug:
command += " -d"
if status_file:
command += " -s %s" % str(status_file)
command += " " + str(orig_header)
command += " " + str(new_header)
command += " " + str(n_x)
command += " " + str(n_y)
command += " " + str(ix)
command += " " + str(iy)
if xpad:
command += " %s" % str(xpad)
if ypad:
command += " %s" % str(ypad)
p = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stderr = p.stderr.read()
if stderr:
raise Exception(stderr)
return status.parse_struct("mTileHdr", p.stdout.read().strip())
|
mit
| 3,596,807,204,537,435,000
| 32.998728
| 78
| 0.601075
| false
| 3.98445
| false
| false
| false
|
fhdk/pacman-mirrors
|
pacman_mirrors/functions/httpFn.py
|
1
|
7641
|
#!/usr/bin/env python
#
# This file is part of pacman-mirrors.
#
# pacman-mirrors is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pacman-mirrors is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pacman-mirrors. If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Frede Hundewadt <echo ZmhAbWFuamFyby5vcmcK | base64 -d>
"""Manjaro-Mirrors HTTP Functions"""
import collections
import filecmp
import json
import os
import requests
import ssl
import time
import urllib.request
import urllib.parse
from http.client import HTTPException
from os import system as system_call
from socket import timeout
from urllib.error import URLError
from pacman_mirrors import __version__
from pacman_mirrors.config import configuration as conf
from pacman_mirrors.constants import timezones
from pacman_mirrors.constants import txt
from pacman_mirrors.functions import fileFn
from pacman_mirrors.functions import jsonFn
from pacman_mirrors.functions import util
USER_AGENT = {"User-Agent": "{}{}".format(conf.USER_AGENT, __version__)}
def get_url_last_modifed(url: str) -> str:
x = requests.head(url)
return x.headers["last-modified"]
def download_mirrors(config: object) -> tuple:
"""Retrieve mirrors from manjaro.org
:param config:
:returns: tuple with bool for mirrors.json and status.json
:rtype: tuple
"""
fetchmirrors = False
fetchstatus = False
try:
# mirrors.json
req = urllib.request.Request(url=config["url_mirrors_json"],
headers=USER_AGENT)
with urllib.request.urlopen(req) as response:
mirrorlist = json.loads(response.read().decode("utf8"),
object_pairs_hook=collections.OrderedDict)
fetchmirrors = True
tempfile = config["work_dir"] + "/.temp.file"
jsonFn.json_dump_file(mirrorlist, tempfile)
filecmp.clear_cache()
if fileFn.check_file(conf.USR_DIR, folder=True):
if not fileFn.check_file(config["mirror_file"]):
jsonFn.json_dump_file(mirrorlist, config["mirror_file"])
elif not filecmp.cmp(tempfile, config["mirror_file"]):
jsonFn.json_dump_file(mirrorlist, config["mirror_file"])
os.remove(tempfile)
except (HTTPException, json.JSONDecodeError, URLError):
pass
try:
# status.json
req = urllib.request.Request(url=config["url_status_json"],
headers=USER_AGENT)
with urllib.request.urlopen(req) as response:
statuslist = json.loads(
response.read().decode("utf8"),
object_pairs_hook=collections.OrderedDict)
fetchstatus = True
jsonFn.write_json_file(statuslist, config["status_file"])
except (HTTPException, json.JSONDecodeError, URLError):
pass
# result
return fetchmirrors, fetchstatus
def get_ip_country() -> str:
"""
Get the user country from connection IP (might be VPN who knows)
:return: country name
"""
try:
return requests.get("https://get.geojs.io/v1/ip/country/full").text
except (URLError, HTTPException):
return ""
def get_mirror_response(url: str, config: object, tty: bool = False, maxwait: int = 2,
count: int = 1, quiet: bool = False, ssl_verify: bool = True) -> float:
"""Query mirror by downloading a file and measuring the time taken
:param config:
:param ssl_verify:
:param tty:
:param url:
:param maxwait:
:param count:
:param quiet:
:returns always return a float value with response time
"""
response_time = txt.SERVER_RES # prepare default return value
probe_stop = None
message = ""
context = ssl.create_default_context()
arch = "x86_64"
if config["x32"]:
arch = "i686"
probe_url = f"{url}{config['branch']}/core/{arch}/{config['test_file']}"
if not ssl_verify:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
req = urllib.request.Request(url=probe_url, headers=USER_AGENT)
probe_start = time.time()
# noinspection PyBroadException
try:
for _ in range(count):
response = urllib.request.urlopen(req, timeout=maxwait, context=context)
_ = response.read()
probe_stop = time.time()
except URLError as err:
if hasattr(err, "reason"):
message = f"{err.reason} '{url}'"
elif hasattr(err, "code"):
message = f"{err.reason} '{url}'"
except timeout:
message = f"{txt.TIMEOUT} '{url}'"
except HTTPException:
message = f"{txt.HTTP_EXCEPTION} '{url}'"
except ssl.CertificateError:
message = f"{ssl.CertificateError} '{url}'"
except Exception as e:
message = f"{e} '{url}'"
if message and not quiet:
util.msg(message=message, urgency=txt.ERR_CLR, tty=tty, newline=True)
if probe_stop:
response_time = round((probe_stop - probe_start), 3)
return response_time
def check_internet_connection(tty: bool = False, maxwait: int = 2) -> bool:
"""Check for internet connection
:param maxwait:
:param tty:
"""
resp = None
hosts = conf.INET_CONN_CHECK_URLS
for host in hosts:
# noinspection PyBroadException
try:
resp = urllib.request.urlopen(host, timeout=maxwait)
break
except Exception as e:
util.msg(f"{host} '{e}'", urgency=txt.WRN_CLR, tty=tty)
return bool(resp)
def ping_host(host: str, tty: bool = False, count: int = 1) -> bool:
"""Check a hosts availability
:param host:
:param count:
:param tty:
:rtype: boolean
"""
util.msg(f"ping {host} x {count}", urgency=txt.INF_CLR, tty=tty)
return system_call("ping -c{} {} > /dev/null".format(count, host)) == 0
def download_mirror_pool(config: object, tty: bool = False, quiet: bool = False) -> tuple:
"""Download updates from repo.manjaro.org
:param config:
:param quiet:
:param tty:
:returns: tuple with True/False for mirrors.json and status.json
:rtype: tuple
"""
result = None
connected = check_internet_connection(tty=tty)
if connected:
if not quiet:
util.msg(message=f"{txt.DOWNLOADING_MIRROR_FILE} {txt.REPO_SERVER}",
urgency=txt.INF_CLR,
tty=tty)
result = download_mirrors(config)
else:
if not fileFn.check_file(config["status_file"]):
if not quiet:
util.msg(message=f"{txt.MIRROR_FILE} {config['status_file']} {txt.IS_MISSING}",
urgency=txt.WRN_CLR,
tty=tty)
util.msg(message=f"{txt.FALLING_BACK} {conf.MIRROR_FILE}",
urgency=txt.WRN_CLR,
tty=tty)
result = (True, False)
if not fileFn.check_file(config["mirror_file"]):
if not quiet:
util.msg(message=f"{txt.HOUSTON}",
urgency=txt.HOUSTON,
tty=tty)
result = (False, False)
return result
|
gpl-3.0
| 6,548,004,164,943,366,000
| 34.050459
| 95
| 0.621646
| false
| 3.765895
| true
| false
| false
|
frjanibo/Stream
|
resources/site-packages/stream/scrapers/espoiler.py
|
1
|
5606
|
from stream import plugin
from stream.scrapers import scraper
from stream.ga import tracked
from stream.caching import cached_route
from stream.utils import ensure_fanart
from stream.library import library_context
from stream.utils import url_get
from bs4 import BeautifulSoup
import re
import requests
import json
BASE_URL = "http://www.espoilertv.com/"
HEADERS = {
"Referer": BASE_URL,
}
payload = {
'mail':plugin.get_setting("espoiler_user"),
'pass':plugin.get_setting("espoiler_pass")
}
s = requests.Session()
p = s.post(BASE_URL+'serv/asincrono/logOn.php', data=payload)
print p.content
@plugin.route("/espoiler/marcar/<idEpisodio>/<accion>")
def espoiler_marcar_visto(idEpisodio, accion):
import xbmc
r = s.post(BASE_URL+"api/v1/mitv", {'accion':accion, 'idEpisodio':idEpisodio} )
print r.content
xbmc.executebuiltin('Container.Refresh')
@scraper("Espoiler TV")
@plugin.route("/espoiler")
def espoiler_index():
print "espoilerTV!"
plugin.set_content("episodes")
yield {
"label": ">> Calendario",
"path": plugin.url_for("espoiler_calendario", dia=0),
'is_playable': False
}
r = s.get(BASE_URL + 'api/v1/mitv?grupo=porVer')
mitv = json.loads(r.content)
for serie in mitv['series']:
print serie['titulo']
print plugin.url_for("espoiler_ver_fuentes", capitulo=serie['idEpisodio'])
item = {}
item['label'] = '%s (S%sE%s)' % (serie['titulo'], serie['temporada'].zfill(2), serie['episodio'].zfill(2))
item['path'] = plugin.url_for("espoiler_ver_serie", titulo=serie['titBase'])
item['is_playable'] = False
item['replace_context_menu'] = True
yield item
@plugin.route("/espoiler/serie/<titulo>")
def espoiler_ver_serie(titulo):
print "espoiler_ver_serie %s" % titulo
plugin.set_content("episodes")
html_data = s.get( BASE_URL+"ficha/"+titulo )
soup = BeautifulSoup(html_data.content, "html5lib")
for node in soup.findAll('div',attrs={'class': re.compile(r".*\bepisodio\b.*")}):
print node
if node.div.input.has_attr('value'):
#print node.div.input['value']
divTitulo = node.findAll('div',attrs={'class': re.compile(r".*\btitulo\b.*")})[0].get_text()
visto = node.findAll('button',attrs={'class': re.compile(r".*\bvisto\b.*")})[0]['data-visto']
playcount = 0 if visto=='no' else 2
print visto + " " + str(playcount)
contextMenu = ("Marcar como visto", "XBMC.RunPlugin(%s)" % plugin.url_for("espoiler_marcar_visto", idEpisodio=node.div.input['value'], accion='visto' ))
if playcount > 0:
contextMenu = ("Marcar como NO visto", "XBMC.RunPlugin(%s)" % plugin.url_for("espoiler_marcar_visto", idEpisodio=node.div.input['value'], accion='noVisto' ))
yield {
'label': '%s - %s' % (node['id'],divTitulo),
'path': plugin.url_for("espoiler_ver_fuentes", capitulo=node.div.input['value']),
'is_playable': False,
'context_menu': [ contextMenu ],
'info':{
"episode": "la madre que lo pario",
'playcount': playcount
}
}
@plugin.route("/espoiler/fuentes/<capitulo>")
def espoiler_ver_fuentes(capitulo):
r = s.get(BASE_URL+"serv/asincrono/enlaces.php?id="+capitulo)
info = json.loads(r.content)
"""
yield {
'label': '%s (S%sE%s)' % (info['titSerie'], info['temporada'].zfill(2), info['episodio'].zfill(2)),
'path': plugin.url_for("espoiler_ver_fuentes", capitulo ),
'is_playable':False
}
"""
for fuente in info['vid']:
yield {
'label': '%s (%s,%s)' % (fuente['dominio'], fuente['descargas'], fuente['reportes']),
'path': plugin.url_for("espoiler_play", url=fuente['url']),
'is_playable': False
}
@plugin.route("/espoiler/play/<url>")
def espoiler_play( url ):
print "espoiler_play %s" % url
html_data = url_get( url, headers=HEADERS)
soup = BeautifulSoup(html_data, "html5lib")
def filter_Magnet(el):
return el.has_attr('href') and 'magnet:' in el['href']
nodes = soup.findAll(filter_Magnet)
for node in nodes:
yield {
'label': '%s' % node['href'],
'path': plugin.url_for("play", uri=node['href']),
'is_playable': True
}
@plugin.route("/espoiler/calendario/<dia>")
@cached_route(ttl=3000, content_type="episodes")
def espoiler_calendario(dia=0):
from datetime import date,timedelta
dia = int(dia)
hoy = date.today()
un_dia = timedelta(days=1)
hoy = hoy + un_dia*dia
yield {
'label': 'espoilerTV Inicio' ,
'path': plugin.url_for("espoiler_index"),
'is_playable': False
}
yield {
'label': '<<< ' + (hoy-un_dia).isoformat() ,
'path': plugin.url_for("espoiler_calendario", dia=dia-1),
'is_playable': False
}
r = s.get( BASE_URL+"api/v1/calendario?fecha="+hoy.isoformat() )
dayCalendar = json.loads(r.content)
for serie in dayCalendar['series']:
yield {
'label': '%s (S%sE%s)' % (serie['titulo'], serie['temporada'].zfill(2), serie['episodio'].zfill(2)),
'path': plugin.url_for("espoiler_ver_serie", titulo=serie['titBase']),
'is_playable': False
}
yield {
'label': '>>> '+(hoy+un_dia).isoformat() ,
'path': plugin.url_for("espoiler_calendario", dia=dia+1),
'is_playable': False
}
|
gpl-3.0
| 6,394,528,551,549,263,000
| 34.257862
| 173
| 0.588833
| false
| 3.061715
| false
| false
| false
|
vojtechtrefny/python-meh
|
meh/safe_string.py
|
1
|
2372
|
#
# Copyright (C) 2013 Red Hat, Inc.
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vratislav Podzimek <vpodzime@redhat.com>
#
#
import sys
PY = int(sys.version.split('.')[0])
"""
This module provides a SafeStr class.
@see: SafeStr
"""
class SafeStr(str):
"""
String class that has a modified __add__ method so that ascii strings,
binary data represented as a byte string and unicode objects can be
safely appended to it (not causing traceback). BINARY DATA IS OMITTED.
"""
def __add__(self, other):
if PY > 2:
return SafeStr(str.__add__(self, str(other)))
if not (isinstance(other, str) or isinstance(other, unicode)):
if hasattr(other, "__str__"):
other = other.__str__()
else:
other = "OMITTED OBJECT WITHOUT __str__ METHOD"
if isinstance(other, unicode):
ret = SafeStr(str.__add__(self, other.encode("utf-8")))
else:
try:
# try to decode which doesn't cause traceback for utf-8 encoded
# non-ascii string and ascii string
other.decode("utf-8")
ret = SafeStr(str.__add__(self, other))
except UnicodeDecodeError:
# binary data, get the representation used by Python for
# non-ascii bytes
# hex(255) returns "0xff", we want "\xff"
other_hexa = (hex(ord(char)) for char in other)
other_backslashed = (hex_num.replace("0x", "\\x")
for hex_num in other_hexa)
other_repr = "".join(other_backslashed)
ret = SafeStr(str.__add__(self, other_repr))
return ret
|
gpl-2.0
| 4,265,282,975,284,031,500
| 32.408451
| 79
| 0.602024
| false
| 4.082616
| false
| false
| false
|
kmaiti/AWSAutoScalingWithF5andCloudFormation
|
aws-autoscale-ec2-instance-modify.py
|
1
|
6954
|
#!/usr/bin/env python
"""
Purpose : Extract next sequence number of auto-scaled instance and set new tag to self instance. Script will be running from new instance.
will take input from command line instead of from json file
Future Plan :
will associate instance to a role based IAM profile
Usage :
python ec2-autoscale-instance-modify.py -a <your aws access_key> -s <aws secret key> -g <auto scale group that used in cloudformation file> -r <region> -n <min_server_number> -c <customer> -t <uat/plab/prod> -p <appname> -d <domainname ie example.net>
"""
__author__ = "kama maiti"
__copyright__ = "Copyright 2016, AWS autoscaled instance tag modification project"
__credits__ = ["kamal maiti"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "kamal maiti"
__email__ = "kamal.maiti@gmail.com"
__status__ = "production/Non-production"
import re
import argparse
import boto.ec2.autoscale
from boto.ec2 import EC2Connection
import shlex, subprocess
akey = ""
skey = ""
ag = ""
rg = ""
min_num = ""
def find_server_number(str):
#Assuming first match only with consecutive three digits
match = []
match = re.findall(r'\d\d\d', str)
if match:
return match #will return a list containg server number
else:
return match #will return blank list
def main():
arg_parser = argparse.ArgumentParser(description='Read autoscale instance')
arg_parser.add_argument('-a', dest='akey',help='Provide AWS_ACCESS_KEY')
arg_parser.add_argument('-s', dest='skey',help='Provide AWS_SECRET_ACCESS_KEY')
arg_parser.add_argument('-g', dest='ag',help='Provide User provided autoscale group name')
arg_parser.add_argument('-r', dest='rg',help='Provide region name')
arg_parser.add_argument('-n', dest='min_num',help='Minimum Server name')
arg_parser.add_argument('-c', dest='customer',help='Name of the customer in short')
arg_parser.add_argument('-t', dest='servertype',help='Type of the server ie prod or uat or plab')
arg_parser.add_argument('-p', dest='purpose',help='Purpose of the Server')
arg_parser.add_argument('-d', dest='domain',help='Domain name that will be appended to server name')
args = arg_parser.parse_args()
#print(args)
access_key = args.akey
secret_key = args.skey
region = args.rg
group_name = str(args.ag)
min_server_num = int(args.min_num)
customer = str(args.customer)
servertype = str(args.servertype)
purpose = str(args.purpose)
domain = str(args.domain)
#created two objects below. One for autocale connection and another for ec2 instance
as_conn = boto.ec2.autoscale.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
ec2_conn = boto.ec2.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
try:
groups = as_conn.get_all_groups()
all_groups = [group.name for group in groups]
for g in all_groups:
if group_name in g: #searching autocaling group that we are concerned with. Note all autoscalling group name should be unique
FOUND_GROUP = g #FOUND_GROUP will save exact AG name. Note that exact AG name is not same as user provided name. It'll check if group_name is subset of g
FOUND_GROUP_WITH_DES = as_conn.get_all_groups(names=[FOUND_GROUP])[0]
instance_ids = [i.instance_id for i in FOUND_GROUP_WITH_DES.instances]
#reservations = ec2_conn.get_all_instances(instance_ids)
instances = ec2_conn.get_only_instances(instance_ids)
#instances = [i for r in reservations for i in r.instances]
lNameTag = []
#collect all tags of all instances and sort Name tags and save them in list.
for i,j in enumerate(instances):
a = instances[i].tags
lNameTag.append(a['Name'])
#process each instances and take their server number in one list
lServerNum = []
if not lNameTag: #checking if list is empty or not. If empty then this is first instance whose server num will be min_server_num
next_number = min_server_num
else:
for server in lNameTag: #iterating each value of "Name" tag
if not find_server_number(server): #if method find_server_number returns null list
next_number = min_server_num
else:
val = find_server_number(server) #got value like [u'101']. Below comand will remove [],' and u
actual_num=str(val).strip('[]').strip('u').strip('\'')
lServerNum.append(int(actual_num)) #actual_num is string, need to convert to int
if not lServerNum: #check if list of server number is blank or not
next_number = min_server_num
else:
maximum_number = max(lServerNum) #used max function to find out maximum number in the list
next_number = maximum_number + 1
#Now we need to save this next_number in a file so that we can collect it and send to other commands.
with open('/tmp/serverno','w') as fd: #created a file and save the number as string. Then read it and used later
fd.write(str(next_number))
with open('/tmp/serverno','r') as fd:
num=fd.read()
#Will modify tag of current instance. Let's build a new tag.
delm = "-" #Delimeter that will be used to join multiple string
seq = ( customer, servertype, purpose, num, domain) #created a tuple
new_tag = delm.join(seq) #joined tuple strings
with open('/tmp/nodename','w') as fd:
fd.write(str(new_tag))
#will extract current instance ID using curl. ie curl http://169.254.169.254/latest/meta-data/instance-id
#
cmd = 'curl http://169.254.169.254/latest/meta-data/instance-id'
#shlex is simple lexical analyser for splitting a large string into tokens
args = shlex.split(cmd) #args will have value like : ['curl', 'http://169.254.169.254/latest/meta-data/instance-id']
output,error = subprocess.Popen(args,stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate() #out and error are saved in variable. communicate will execute comamnd
#o="i-fd96291f" #used for testing
cur_instance_reservation = ec2_conn.get_all_instances(instance_ids=output)
cur_instance = cur_instance_reservation[0].instances[0]
cur_instance.add_tag('Name', new_tag)
finally:
as_conn.close()
ec2_conn.close()
if __name__ == '__main__':
main()
|
gpl-3.0
| -2,848,600,508,939,780,600
| 53.755906
| 252
| 0.623095
| false
| 3.8
| false
| false
| false
|
GeographicaGS/Elcano-iepg
|
www-srv/src/scripts/popgdp_newyear.py
|
1
|
1294
|
new_year = 2014
input_file = '../csv/popgdp2014.csv'
import sys
sys.path.append('../../www-srv/src')
from maplex.maplexmodel import MaplexModel
mm = MaplexModel()
import csv
with open(input_file, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
sql = ""
for row in spamreader:
countryname = row[0]
gdp = row[1].replace(",",".")
population = row[2].replace(",",".")
if countryname.strip() == "":
continue
geoentity = mm.getIdGeoentityByName(countryname,2)
if not geoentity:
raise Exception('Not found geoentity for ' + countryname)
geoentity_id = geoentity[0]["id_geoentity"]
geoentity_names = mm.getGeoentityNames(geoentity_id,1)
if not geoentity_names:
raise Exception('Not found geoentity code name for ' + countryname)
geoentity_code = geoentity_names[0]["names"][0]
sql += "INSERT INTO iepg_data_redux.pob_pib (code,date_in,date_out,pib,population) VALUES ('%s','%s','%s',%f,%f);\n" % \
(geoentity_code,str(new_year) + "-01-01",\
str(new_year) + "-12-31",float(gdp)* (10**9),\
float(population)* (10**6))
print sql
|
gpl-3.0
| -5,986,356,302,128,184,000
| 28.431818
| 128
| 0.566461
| false
| 3.42328
| false
| false
| false
|
bmazin/ARCONS-pipeline
|
util/ObsFileSeqV2.py
|
1
|
33310
|
import os
import math
import time
import warnings
import numpy as np
from util import FileName
from util import ObsFile
from util import TCS
from interval import interval
import pyfits
import matplotlib.pyplot as plt
import pickle
from headers.DisplayStackHeaders import writeImageStack, readImageStack
class ObsFileSeq():
"""
Deal with a sequence of obsFiles, and present data as a set of
frames. Each frame has the telescope pointing at the same location,
as calculated by the TCS log file written by the data acquisition
system.
For a given run and date, consider the list of timeStamps as one
continuous observation, with the given name. Divide the observations
into a set of frames, with the maximum amount of seconds in one frame given
by dt
If the data acquisition system was doing a mosaic at the time, each
frame will contain data only for one telescope position. Two seconds after
each telescope move is masked, to give the telescope a chance to settle
in its new position.
One frame may contain data from more than one ObsFile.
getTargetList lists the timestamp (and target description) to show
the list of obsFiles included in this sequence.
getFrameList lists the information for each frame; its ra,dec offset,
and which obsFiles contribute to this frame.
plotLocations makes a simple png of the frame numbers plotted in
raOffset,decOffset from the initial position.
executing just this file demonstrates calls to getTargetlist and
plotLocations for a mosaic of the ring nebula.
"""
def __init__(self, name, run, date, timeStamps, dt):
"""
name -- a useful name for this set of objects
run -- the data campaign name (LICK2014)
date -- sundown date (20141020)
timeStamps -- the UTC date/time stamp
dt -- the maximum number of seconds for one frame
beamMapFile -- the beamMap file to use (None to use default)
"""
self.name = name
self.run = run
self.date = date
self.timeStamps = timeStamps
self.timeStamps.sort()
self.dt = dt
self.obsFiles = []
self.fileNames = []
self.obsFileUnixTimes = []
for timeStamp in self.timeStamps:
fn = FileName.FileName(run, date, timeStamp)
self.fileNames.append(fn)
of = ObsFile.ObsFile(fn.obs(), repeatable=True)
of.loadBeammapFile(fn.beammap())
of.loadBestWvlCalFile()
fn2 = FileName.FileName(run, date, "")
of.loadFlatCalFile(fn2.flatSoln())
try:
of.loadHotPixCalFile(fn.timeMask())
self.hotPixelsApplied = True
except:
self.hotPixelsApplied = False
self.obsFiles.append(of)
self.obsFileUnixTimes.append(of.getFromHeader('unixtime'))
self.tcs = TCS.TCS(run, date)
self.tcsDict = self.tcs.select(self.obsFiles[0], self.obsFiles[-1])
# each interval covers one obsFile
self.obsIntervals = []
for i in range(len(self.obsFiles)):
tStart = self.obsFiles[i].getFromHeader('unixtime')
tEndThis = tStart + self.obsFiles[i].getFromHeader('exptime')
if i < len(self.obsFiles) - 1:
tStartNext = self.obsFiles[i + 1].getFromHeader('unixtime')
tEnd = min(tEndThis, tStartNext)
else:
tEnd = tEndThis
self.obsIntervals.append(interval[tStart, tEnd])
self._defineFrames(dt)
# Default settings for astrometry
try:
self.setRm()
except:
# Fails if the telescope doesn't move every frame...
# Ignore this for now
pass
def setRaDrift(self, raDrift):
self.raArcsecPerSec = raDrift
def setScaleThetaDrift(self, moveList, driftList):
"""
Set scale and theta for this sequence.
input:
driftList is a list of dictionaries, containing iFrame, row, col
for the object found in two frames separated significantly in time
and with the same ra,dec offset
moveList is a list of dictionaries, containing iFrame, row, col
for the object found in two frames that are close in time
"""
# Calculate the scale and rotation with no drift
matchList = []
for frc in moveList:
iFrame = frc['iFrame']
matchList.append({'ra': self.tcsDict['raOffset'][iFrame] / 3600.0,
'dec': self.tcsDict['decOffset'][iFrame] / 3600.0,
'row': frc['row'],
'col': frc['col']})
scaleTheta = ObsFileSeq.getScaleTheta(matchList)
ct = math.cos(scaleTheta['theta'])
st = math.sin(scaleTheta['theta'])
# See how much row,col=0,0 moved between the two drift frames
ras = []
decs = []
times = []
for i in range(2):
row = driftList[i]['row']
col = driftList[i]['col']
print "i,row,col", i, row, col
ras.append((col * ct + row * st) / scaleTheta['scale'])
decs.append((-col * st + row * ct) / scaleTheta['scale'])
iFrame = driftList[i]['iFrame']
times.append(self.getTimeBySeq(iFrame))
self.raArcsecPerSec = 3600.0 * (ras[1] - ras[0]) / (times[1] - times[0])
print "ras=", ras
print "times=", times
print "raArcsecPerSec", self.raArcsecPerSec
self.rc0 = np.zeros((2, len(self.frameObsInfos)), dtype=np.float)
# Calculate the scale and rotation, including drift
matchList = []
t0 = self.getTimeBySeq(0)
for frc in moveList:
iFrame = frc['iFrame']
t = self.getTimeBySeq(iFrame)
raDrift = self.raArcsecPerSec * (t - t0)
ra = (self.tcsDict['raOffset'][iFrame] - raDrift) / 3600.0
matchList.append({'ra': ra,
'dec': self.tcsDict['decOffset'][iFrame] / 3600.0,
'row': frc['row'],
'col': frc['col']})
scaleTheta = ObsFileSeq.getScaleTheta(matchList)
# Find the row,col at the ra,dec of each frame
ct = math.cos(scaleTheta['theta'])
st = math.sin(scaleTheta['theta'])
for iFrame in range(len(self.frameObsInfos)):
t = self.getTimeBySeq(iFrame)
raDrift = self.raArcsecPerSec * (t - t0)
print "iFrame,raDrift", iFrame, raDrift
raOff = (self.tcsDict['raOffset'][iFrame] - raDrift) / 3600.0
deOff = (self.tcsDict['decOffset'][iFrame]) / 3600.0
# 0 for row; 1 for col
self.rc0[0:iFrame] = (raOff * st + deOff * ct) / scaleTheta['scale']
self.rc0[1:iFrame] = (raOff * ct - deOff * st) / scaleTheta['scale']
print "iFrame, raOffset, deOffset", iFrame, self.tcsDict['raOffset'][iFrame], self.tcsDict['decOffset'][
iFrame], raOff, deOff
# print "iFrame, raOff, deOff, row, col",iFrame,raOff,deOff, self.rc0[0,iFrame], self.rc0[1,iFrame]
# Numpy Kung-Fu here to subtract the minimum row,col
self.rc0 -= self.rc0.min(axis=1)[:, None]
# Calculate the size of the full image to include all pixels
self.nRowCol = self.rc0.max(axis=1)
self.nRowCol[0] += self.obsFiles[0].nRow
self.nRowCol[1] += self.obsFiles[0].nCol
self.nRowCol = np.ceil(self.nRowCol).astype(np.int)
@staticmethod
def getScaleTheta(matchList, flip=1):
"""
Calculate scale and theta for the measurement of an object in two
frames. The coordinate system is centered on the object.
col,row = position in pixel coordinates
ra,dec = position in sky coordinates
The transformation is specified with:
f = +1 or -1 to flip one axis:
col0,row0 is the location in pixel coordinates of the origin of
the sky coordinates, where ra,dec = 0,0
theta -- rotation angle
scale -- degrees/pixel
The transformation equations are:
col = col0 + (flip*ra*cos(theta) - dec*sin(theta)) / scale
row = row0 + (flip*ra*sin(theta) + dec*sin(theta)) / scale
ra = ( col*cos(theta) + row*sin(theta)) * scale / flip
dec = (-col*sin(theta) + row*cos(theta)) * scale
input:
matchList is a list of dictionaries, containing ra, dec, row, col.
ra,dec is the location (in decimal degrees) of row,col=(0,0)
row,col is the location of the object in the frame
return:
a dictionary of scale (in degrees/pixel) and theta (radians)
"""
m0 = matchList[0]
m1 = matchList[1]
dra = m1['ra'] - m0['ra']
ddec = m1['dec'] - m0['dec']
dr = m1['row'] - m0['row']
dc = m1['col'] - m0['col']
theta = math.atan2((flip * dra * dr - ddec * dc), (ddec * dr + flip * dra * dc))
scale = math.sqrt((dra ** 2 + ddec ** 2) / (dc ** 2 + dr ** 2))
return {"scale": scale, "theta": theta}
def setTransform(self, scale, thetaDegrees, flip, rdot, cdot):
"""
"""
self.scale = scale # degrees/pixel
self.thetaDegrees = thetaDegrees
print "in setTransform: scale, thetaDegrees =",scale, thetaDegrees
self.ct = math.cos(math.radians(thetaDegrees))
self.st = math.sin(math.radians(thetaDegrees))
self.flip = flip
self.rdot = rdot
self.cdot = cdot
# Keep track of the position of r,c=0,0 in each fram with the arreay self.rc0,where
#r0 = int(self.rc0[0, iFrame])
#c0 = int(self.rc0[1, iFrame])
self.rc0 = np.zeros((2, self.nFrames), dtype=float)
for iFrame in range(self.nFrames):
r0c0 = self.getR0C0(iFrame)
self.rc0[0, iFrame] = r0c0['row0']
self.rc0[1, iFrame] = r0c0['col0']
# Numpy yoga to subtract minimum row,col and set nRowNcol
self.rc0 -= self.rc0.min(axis=1)[:, None]
self.nRowCol = self.rc0.max(axis=1)
self.nRowCol[0] += self.obsFiles[0].nRow
self.nRowCol[1] += self.obsFiles[0].nCol
self.nRowCol = np.ceil(self.nRowCol).astype(np.int)
print "end of setTransform: nRowCol=",self.nRowCol
def getR0C0(self, iFrame):
ra = self.tcsDict['raOffset'][iFrame]
dec = self.tcsDict['decOffset'][iFrame]
col = (self.flip * ra * self.ct - dec * self.st) / self.scale
row = (self.flip * ra * self.st + dec * self.ct) / self.scale
dt = self.tcsDict['timeOffset'][iFrame]
col0 = -col - dt*self.cdot
row0 = -row - dt*self.rdot
retval = dict(col0=col0, row0=row0)
print "iFrame=%3d ra(as)=%5.1f dec(as)=%5.1f row=%5.1f col=%5.1f dt=%9.3f col0=%5.1f row0=%5.1f"%(iFrame, ra, dec, row, col, dt, col0, row0)
return retval
def setRm(self,
degreesPerPixel=0.4 / 3600,
thetaDeg=0.0,
raArcsecPerSec=0.0,
verbose=False):
"""
Sets variables that will be used to offset frames
self.rdl is a list of raOffset, decOffset based on where the
telescope says it was pointing, adding in the drift in ra
self.rc0 is a list of the row,col locations
"""
if verbose:
print " arcsecPerPixel = ", degreesPerPixel * 3600
print "theta (degrees) = ", thetaDeg
print " raArcsecPerSec = ", raArcsecPerSec
self.degreesPerPixel = degreesPerPixel
self.thetaDeg = thetaDeg
self.raArcsecPerSec = raArcsecPerSec
theta = math.radians(thetaDeg)
sct = math.cos(theta) * degreesPerPixel
sst = math.sin(theta) * degreesPerPixel
self.rmPixToEq = np.array([[sct, -sst], [sst, sct]])
self.rmEqToPix = np.linalg.inv(self.rmPixToEq)
t0 = self.getTimeBySeq(0)
self.rdl = []
for iFrame in range(len(self.frameObsInfos)):
t = self.getTimeBySeq(iFrame)
raDrift = raArcsecPerSec * (t - t0)
raOff = (self.tcsDict['raOffset'][iFrame] - raDrift) / 3600.0
deOff = (self.tcsDict['decOffset'][iFrame]) / 3600.0
self.rdl.append([raOff, deOff])
self.rc0 = np.dot(self.rmEqToPix, np.array(self.rdl).transpose())
# Numpy Kung-Fu here to subtract the minimum row,col
self.rc0 -= self.rc0.min(axis=1)[:, None]
self.nRowCol = self.rc0.max(axis=1)
self.nRowCol[0] += self.obsFiles[0].nRow
self.nRowCol[1] += self.obsFiles[0].nCol
self.nRowCol = np.ceil(self.nRowCol).astype(np.int)
def makeMosaicImage(self, iFrameList=None, wvlBinRange=None,
verbose=False):
"""
create a mosaic image of the frames listed, in the wavelength bin range
input: iFrameList, default None uses all frames
wvlBinRange, default None uses all wavelength bins,
otherwise (wBinMin,wBinMax)
output: a numpy 2d of the counts/second image
"""
try:
self.cubes
except AttributeError:
if verbose:
print "ObsFileSeq.makeMosaicImage: loadSpectralCubes()"
self.loadSpectralCubes()
cubeSum = np.zeros((self.nRowCol[0], self.nRowCol[1]))
effIntTimeSum = np.zeros((self.nRowCol[0], self.nRowCol[1]))
nRowCube = self.obsFiles[0].nRow
nColCube = self.obsFiles[0].nCol
if iFrameList is None:
iFrameList = range(self.nFrames)
if wvlBinRange is None:
wBinMin = 0
wBinMax = self.cubes[0]['cube'].shape[2]
else:
wBinMin = wvlBinRange[0]
wBinMax = wvlBinRange[1]
for iFrame in iFrameList:
r0 = int(self.rc0[0, iFrame])
c0 = int(self.rc0[1, iFrame])
if verbose:
print "ObsFileSeq:makeMosaicImage: r0,c0=", r0, c0
# The third index here is where you select which wavelength bins
# to include
cubeSum[r0:r0 + nRowCube, c0:c0 + nColCube] += \
self.cubes[iFrame]['cube'][:, :, wBinMin:wBinMax].sum(axis=2)
effIntTimeSum[r0:r0 + nRowCube, c0:c0 + nColCube] += \
self.cubes[iFrame]['effIntTime'][:, :]
with np.errstate(divide='ignore'):
cps = cubeSum / effIntTimeSum
cps = np.nan_to_num(cps)
return cps
def __del__(self):
for of in self.obsFiles:
del of
def _defineFrames(self, dt):
self.dt = dt
mts = self.tcsDict['time']
# make a list of times:
# start of first observation, each move, end of last observation
times = np.zeros(len(mts) + 2)
times[1:-1] = mts
times[0] = self.obsFiles[0].getFromHeader('unixtime')
self.beginTime = times[0]
times[-1] = self.obsFiles[-1].getFromHeader('unixtime') + \
self.obsFiles[-1].getFromHeader('exptime')
# Divide these segments into lengths of dt. Exclude two seconds after
# each boundary, to let the telescope settle down after a move
self.frameIntervals = []
self.locationIdx = []
for i in range(len(times) - 1):
t0 = times[i] + 2
t1 = times[i + 1]
nSeg = int((t1 - t0) / dt) + 1
delta = (t1 - t0) / nSeg
for j in range(nSeg):
self.frameIntervals.append(interval[t0 + j * delta,
t0 + (j + 1) * delta])
self.locationIdx.append(i)
# For each frame, determine a list of:
# obsFile, firstSec, integrationTime
# In general, one frame will need more than one of these if it
# spans the time boundary between obsFiles
self.frameObsInfos = []
for iFrame in range(len(self.frameIntervals)):
frameObsInfo = []
thisInterval = self.frameIntervals[iFrame]
# see if this interval overlaps with an obsFile
for i, obsInterval in enumerate(self.obsIntervals):
overlap = thisInterval & obsInterval
if len(overlap) > 0:
tBeg = overlap[0][0]
tEnd = overlap[0][1]
integrationTime = tEnd - tBeg
firstSec = tBeg - \
self.obsFiles[i].getFromHeader('unixtime')
obs = self.obsFiles[i]
obsInfo = {"obs": obs,
"iObs": i,
"firstSec": firstSec,
"integrationTime": integrationTime}
frameObsInfo.append(obsInfo)
self.frameObsInfos.append(frameObsInfo)
self.nFrames = len(self.frameObsInfos)
def getTimeBySeq(self, iSeq):
"""
get the mean time of the frame
"""
foi = self.frameObsInfos[iSeq]
wtSum = 0
wSum = 0
for oi in foi:
w = oi['integrationTime']
t = self.obsFileUnixTimes[oi['iObs']] + oi['firstSec'] + \
0.5 * oi['integrationTime']
wtSum += w * t
wSum += w
meanTime = wtSum / float(wSum)
return meanTime
def getTargetList(self, printLines=True):
"""
get list of information: the timstamp + target description in header
default printLine=True to also print each line to stdout
return: a list of the lines
"""
retval = []
for i, timeStamp in enumerate(self.timeStamps):
target = self.obsFiles[i].getFromHeader('target')
line = "%2d %s %s" % (i, timeStamp, target)
if printLines:
print line
retval.append(line)
return retval
def getFrameList(self, printLines=True):
"""
returns a list of lines which describes the frames.
Each line has an index, the time (relative to the time of the first
frame), effective integration time, location number,
the (raOffset,decOffset), and a list of obs files used
in the frame.
"""
retval = []
for i, frameInterval in enumerate(self.frameIntervals):
t0 = frameInterval[0][0] - self.beginTime
t1 = frameInterval[0][1] - self.beginTime
dt = t1 - t0
locIdx = self.locationIdx[i]
xOff = self.tcsDict['raOffset'][locIdx]
yOff = self.tcsDict['decOffset'][locIdx]
obsFiles = ""
for frameObsInfo in self.frameObsInfos[i]:
obsFiles += " %d" % frameObsInfo['iObs']
fmt = "i=%3d begin=%8.2f expTime=%6.2f loc=%2d (%5.1f,%5.1f) %s"
line = fmt % (i, t0, dt, locIdx, xOff, yOff, obsFiles)
if printLines:
print line
retval.append(line)
return retval
def getFrameDict(self, printLines=True):
"""
Pretty much the same as getFramList except that it returns
a dictionary for each frame
"""
frameInfo = []
for i, frameInterval in enumerate(self.frameIntervals):
t0 = frameInterval[0][0] - self.beginTime
t1 = frameInterval[0][1] - self.beginTime
dt = t1 - t0
locIdx = self.locationIdx[i]
xOff = self.tcsDict['raOffset'][locIdx]
yOff = self.tcsDict['decOffset'][locIdx]
meanTF = self.getTimeBySeq(i)
fI = {"iframe": i,
"begin": t0,
"expTime": dt,
"loc": locIdx,
"offsRA": xOff,
"offsDec": yOff,
"meanTime": meanTF}
# "obsFile":ofs.fileNames[i].obs(),
# "ob":ofs.obsFiles[i]}
if printLines:
print fI
frameInfo.append(fI)
self.frameDict = frameInfo
return frameInfo
def getSpectralCubeByFrame(self, iFrame, weighted=False,
fluxWeighted=False,
wvlStart=None, wvlStop=None,
wvlBinWidth=None, energyBinWidth=None,
wvlBinEdges=None, timeSpacingCut=None):
"""
return the spectral cube for this frame
call ObsFile.getSpectralCube for each ObsFile in this frame.
The dictionary returned copies 'wvlBinEdges' from the first ObsFile,
and sums the 'cube' and 'effIntTime' from all ObsFiles.
I left the print statements in to report progress, because this is
very slow.
"""
retval = None
thisInterval = self.frameIntervals[iFrame]
for i, ofInterval in enumerate(self.obsIntervals):
overlap = thisInterval & ofInterval
if len(overlap) > 0:
tBeg = overlap[0][0]
tEnd = overlap[0][1]
integrationTime = tEnd - tBeg
firstSec = tBeg - self.obsFiles[i].getFromHeader('unixtime')
obs = self.obsFiles[i]
obs.setWvlCutoffs(wvlLowerLimit=wvlStart,
wvlUpperLimit=wvlStop)
spectralCube = \
obs.getSpectralCube(firstSec=firstSec,
integrationTime=integrationTime,
weighted=weighted,
fluxWeighted=fluxWeighted,
wvlStart=wvlStart,
wvlStop=wvlStop,
wvlBinWidth=wvlBinWidth,
energyBinWidth=energyBinWidth,
wvlBinEdges=wvlBinEdges,
timeSpacingCut=timeSpacingCut
)
cube = spectralCube['cube']
wbe = spectralCube['wvlBinEdges']
eit = spectralCube['effIntTime']
if retval is None:
retval = {'cube': cube,
'wvlBinEdges': wbe,
'effIntTime': eit}
else:
retval['cube'] += cube
retval['effIntTime'] += eit
return retval
def loadSpectralCubes(self, weighted=False, fluxWeighted=False,
wvlStart=None, wvlStop=None,
wvlBinWidth=None, energyBinWidth=None,
wvlBinEdges=None, timeSpacingCut=None):
"""
calls getSpectralCubeByFrame on each iFrame, storing the
results in the list self.cubes
use a pickle file named name.pkl as a buffer. If that file
exists, load the cubes from there, and save the cubes there
after loading. WARNING -- the settings are not stored, so
they are ignored when loading from the pickle file.
"""
cpfn = self.name + "-cubes.pkl"
if os.path.isfile(cpfn):
print "loadSpectralCubes: load from ", cpfn
self.cubes = pickle.load(open(cpfn, 'rb'))
else:
self.cubes = []
for iFrame in range(len(self.frameIntervals)):
print "now load spectral cube for iFrame=", iFrame
cube = self.getSpectralCubeByFrame(iFrame)
print "counts are ", cube['cube'].sum()
self.cubes.append(cube)
# self.cubes.append(self.getSpectralCubeByFrame(iFrame,
# weighted,
# fluxWeighted,
# wvlStart,
# wvlStop,
# wvlBinWidth,
# energyBinWidth,
# wvlBinEdges,
# timeSpacingCut))
print "counts read: ", self.cubes[-1]['cube'].sum()
pickle.dump(self.cubes, open(cpfn, 'wb'))
def makePngFileByInterval(self, thisInterval, wvMin=3000, wvMax=12000,
rateMax=None):
fn = "%s-%03d-%05d-%05d.png" % \
(self.name, thisInterval, int(wvMin), int(wvMax))
print "now make fn=", fn
cubeSum = self.cubes[thisInterval]['cube'].sum(axis=2)
effIntTimeSum = self.cubes[thisInterval]['effIntTime']
old_settings = np.seterr(all='ignore')
np.seterr(divide='ignore')
rate = np.nan_to_num(cubeSum / effIntTimeSum)
np.seterr(**old_settings)
print fn, rate.min(), rate.max()
plt.clf()
if rateMax is None:
rateMax = rate.max()
plt.pcolor(rate, cmap='hot', vmin=0, vmax=rateMax)
plt.colorbar()
try:
os.remove(fn)
except OSError:
pass
plt.title(fn)
plt.savefig(fn)
def makeAllFitsFiles(self, wvMin, wvMax):
self.loadSpectralCubes(wvlStart=wvMin, wvlStop=wvMax)
for interval in range(len(self.frameIntervals)):
self.makeFitsFileByInterval(interval, wvMin, wvMax)
return
def makeFitsFileByInterval(self, thisInterval, wvMin, wvMax):
fn = "%s-%03d-%05d-%05d.fit" % (self.name, thisInterval,
int(wvMin), int(wvMax))
print "now make fn=", fn
pixels = self.cubes[thisInterval]['cube'].sum(axis=2)
print "number of counts=", pixels.sum()
hdu = pyfits.PrimaryHDU(pixels)
try:
os.remove(fn)
except OSError:
pass
hdu.writeto(fn)
def loadImageStack(self, fileName, wvlStart=None, wvlStop=None,
weighted=True, fluxWeighted=False,
getRawCount=False, scaleByEffInt=True,
deadTime=100.e-6):
# If the file exists, read it out
if os.path.isfile(fileName):
return readImageStack(fileName)
# if the file doesn't exists, make it
else:
images = []
pixIntTimes = []
startTimes = []
endTimes = []
intTimes = []
for iFrame in range(len(self.frameIntervals)):
im_dict = self.getPixelCountImageByFrame(iFrame,
wvlStart, wvlStop,
weighted,
fluxWeighted,
getRawCount,
scaleByEffInt,
deadTime)
images.append(im_dict['image'])
pixIntTimes.append(im_dict['pixIntTime'])
startTimes.append(im_dict['startTime'])
endTimes.append(im_dict['endTime'])
intTimes.append(im_dict['intTime'])
writeImageStack(fileName, images, startTimes=startTimes,
endTimes=endTimes, intTimes=intTimes,
pixIntTimes=pixIntTimes, targetName=self.name,
run=self.run,
nFrames=len(self.frameIntervals),
wvlLowerLimit=wvlStart,
wvlUpperLimit=wvlStop, weighted=weighted,
fluxWeighted=fluxWeighted,
hotPixelsApplied=self.hotPixelsApplied,
maxExposureTime=self.dt,
tStamps=self.timeStamps)
# return {'images':images,'pixIntTimes':pixIntTimes,
# 'startTimes':startTimes,'endTimes':endTimes,'intTimes':intTimes}
return readImageStack(fileName)
def getPixelCountImageByFrame(self, iFrame, wvlStart=None, wvlStop=None,
weighted=True, fluxWeighted=True,
getRawCount=False, scaleByEffInt=True,
deadTime=100.e-6):
'''
This gets the i'th image
Inputs:
iFrame - which frame you want
wvlStart - starting wavelength range
wvlStop - ending wavelength range
weighted, fluxWeighted, getRawCount, scaleByEffInt -
options for obsFile.getPixelCountImage()
deadTime - for deadtime correcting image
Returns:
Dictionary with the following keys:
'image' - fully calibrated, corrected image.
scaled to the total integration time
deadTime corrected
'pixIntTime' - actual integration time for each pixel in image
'intTime' - length of exposure
'startTime' - beginning of image (unix time)
'endTime' - end of image. Might be different from
startTime+intTime if there's a break in the middle while
switching to a new obsFile
'''
retval = None
for obsInfo in self.frameObsInfos[iFrame]:
print obsInfo
obsInfo['obs'].setWvlCutoffs(wvlLowerLimit=wvlStart,
wvlUpperLimit=wvlStop)
im_dict = obsInfo['obs']. \
getPixelCountImage(obsInfo["firstSec"],
obsInfo["integrationTime"],
weighted,
fluxWeighted,
getRawCount,
scaleByEffInt=False)
# Do this manually so
# we can correct deadTime first
im = im_dict['image']
# print 'im: ', np.sum(im)
# Correct for dead time
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
'invalid value encountered in divide',
RuntimeWarning)
w_deadTime = \
1.0 - im_dict['rawCounts'] * deadTime / im_dict['effIntTimes']
im = im / w_deadTime
if scaleByEffInt:
# Correct for exposure time
im = im * obsInfo["integrationTime"] / im_dict['effIntTimes']
# Remove any funny values
im[np.invert(np.isfinite(im))] = 0.
# print '--> ', np.sum(im)
if retval is None:
retval = {'image': im, 'pixIntTime': im_dict['effIntTimes'],
'intTime': obsInfo["integrationTime"],
'startTime': self.frameIntervals[iFrame][0][0],
'endTime': self.frameIntervals[iFrame][0][1]}
else:
retval['image'] += im
retval['pixIntTime'] += im_dict['effIntTimes']
retval['intTime'] += obsInfo["integrationTime"]
return retval
def plotLocations(self, fileName=None):
plt.clf()
x = self.tcsDict['raOffset']
y = self.tcsDict['decOffset']
plt.plot(x, y)
for i in range(len(x)):
plt.text(x[i], y[i], str(i),
horizontalalignment="center",
verticalalignment="center")
plt.axes().set_aspect('equal', 'datalim')
plt.title(self.name)
plt.xlabel("raOffset (arcsec)")
plt.ylabel("decOffset (arcsec)")
print "in ObsFileSeq.plotLocations: fileName=", fileName
if not fileName:
plt.show()
else:
plt.savefig(fileName)
if __name__ == "__main__":
if 0:
name = 'ring-20141020'
run = "PAL2014"
date = "20141020"
tsl = [
'20141021-033954',
'20141021-034532',
'20141021-035035',
'20141021-035538',
'20141021-040041',
'20141021-040544',
'20141021-041047',
]
dt = 200
ofs = ObsFileSeq(name, run, date, tsl, dt)
print "Now call getTargetList"
ofs.getTargetList()
print "Now call getFrameList"
ofs.getFrameList()
ofs.plotLocations(name + ".png")
print "now get time of first frame"
for i in range(66):
print "i=", i, " time=", ofs.getTimeBySeq(i)
# apci = ofs.getAllPixelCountImages(getRawCount=True)
del ofs
|
gpl-2.0
| -2,156,267,733,733,224,700
| 41.379135
| 148
| 0.52918
| false
| 3.94528
| false
| false
| false
|
pfjel7/housing-insights
|
python/housinginsights/ingestion/CSVWriter.py
|
1
|
3899
|
"""
CSVWriter.py contains the CSVWriter class that is used to create a clean.psv
file that can later be used to load to the database.
"""
from csv import DictWriter
import os
import copy
import uuid
logging_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir, "logs"))
class CSVWriter(object):
"""
Takes a row of data, plus the meta data about it, and creates a clean.csv
file locally that can be later be bulk-uploaded to the database.
"""
def __init__(self, meta, manifest_row, filename=None):
"""""
:param meta: the parsed json from the meta data containing the format
expected of each SQL table.
:param manifest_row: a dictionary from manifest.csv for the source file
currently being acted on.
:param filename: optional, filename of where to write the data.
The default is current directory temp_{tablename}.csv
"""
self.manifest_row = manifest_row
self.tablename = manifest_row['destination_table']
self.unique_data_id = manifest_row['unique_data_id']
self.meta = meta
self.fields = meta[self.tablename]['fields']
# DictWriter needs a list of fields, in order, with the same key as
# the row dict sql_fields could be used in the header row. Currently
# not using because psycopg2 doesn't like it.
self.csv_fields = []
self.sql_fields = []
for field in self.fields:
self.csv_fields.append(field['source_name'])
self.sql_fields.append(field['sql_name'])
# We always want to append this to every table. write() should also
# append this to provided data
self.dictwriter_fields = copy.copy(self.csv_fields)
# Add id column to every table
self.dictwriter_fields.append('id')
# By default, creates a temp csv file wherever the calling module was
# located
self.filename = 'temp_{}.psv'.format(self.unique_data_id) if filename == None else filename
# remove any existing copy of the file so we are starting clean
self.remove_file()
#Using psycopg2 copy_from does not like having headers in the file. Commenting out
#self.file = open(self.filename, 'w', newline='')
#headerwriter = DictWriter(self.file, fieldnames = self.sql_fields, delimiter="|")
#headerwriter.writeheader()
#self.file.close()
#print("header written")
self.file = open(self.filename, 'a', newline='', encoding='utf-8')
self.writer = DictWriter(self.file, fieldnames=self.dictwriter_fields, delimiter="|")
def write(self, row):
"""
Writes the given row into the clean pipe-delimited file that will be
loaded into the database.
:param row: the given data row
:return: None
"""
row['unique_data_id'] = self.manifest_row['unique_data_id']
# Note to developers - if this row returns a key error due to an
# optional column, it means you need to have your cleaner add a 'null'
# value for that optional column.
# Generate a random uuid
row['id'] = str(uuid.uuid4())
self.writer.writerow(row)
def open(self):
"""
Opens the file for writing. Normally called by init, but can be called
again by the user if they want to re-open the file for writing
"""
def close(self):
"""
Since we can't use a with statement in the object, it's the caller's
responsibility to manually close the file when they are done writing
"""
self.file.close()
# TODO should this be part of the __del__
def remove_file(self):
try:
os.remove(self.filename)
except OSError:
pass
|
mit
| 3,770,622,406,761,371,600
| 36.133333
| 99
| 0.619646
| false
| 4.210583
| false
| false
| false
|
dani-i/bachelor-project
|
utils/charts/chart_entry.py
|
1
|
2892
|
class ChartEntry:
def __init__(self):
self._identifier = ''
self._x = -1
self._y = -1
self._confidence_interval_95 = -1
def __str__(self):
rez = '\n## Is valid : ' + str(self.is_valid()) + ' ##'
rez += '\nIdentifier : ' + self.identifier
rez += '\nX : ' + str(self.x)
rez += '\nY : ' + str(self.y)
rez += '\n+++++++++++++++++++++++++++++++++++++++++++++++++++++++++'
return rez
##########################################################################
# identifier
@property
def identifier(self):
return self._identifier
@identifier .setter
def identifier(self,
value: str):
if not isinstance(value, str) \
or value == '':
raise ValueError('Identifier must be string and not empty.')
self._identifier = value
##########################################################################
# x
@property
def x(self):
return self._x
@x.setter
def x(self,
value):
if isinstance(value, float):
value = int(round(value))
if not isinstance(value, int) or value < 0:
raise ValueError('X must be integer and >= 0.')
self._x = value
##########################################################################
# y
@property
def y(self):
return self._y
@y.setter
def y(self,
value):
if isinstance(value, float):
value = int(round(value))
if not isinstance(value, int) or value < 0:
raise ValueError('Y must be integer and >= 0.')
self._y = value
##########################################################################
# confidence_interval_95
@property
def confidence_interval_95(self):
return self._confidence_interval_95
@confidence_interval_95.setter
def confidence_interval_95(
self,
value):
if not isinstance(value, float) or value < 0:
raise ValueError('CI must be float and >= 0.')
self._confidence_interval_95 = value
##########################################################################
def is_valid(self):
"""
- Checks if valid
:return: - True if valid
- False otherwise
"""
if not isinstance(self.identifier, str) or self.identifier == '' \
or not isinstance(self.confidence_interval_95, float) \
or self.confidence_interval_95 < 0 \
or not isinstance(self.x, int) or self.x < 0 \
or not isinstance(self.y, int) or self.y < 0:
return False
return True
##########################################################################
|
apache-2.0
| 1,384,881,015,369,096,700
| 25.290909
| 78
| 0.412517
| false
| 4.868687
| false
| false
| false
|
Oblivion1221/Pycodes
|
learn_py/pig_latin.py
|
1
|
1048
|
from __future__ import print_function
def consonant(c):
v = ['a', 'e', 'i', 'o', 'u','A', 'E', 'I', 'O', 'U']
if c not in v:
return True
else:
return False
def pig_latin_1(word):
if consonant(word[0]):
word1 = word[1:] + word[0] + 'a' + 'y'
return str(word1)
else:
return word + 'way'
def pig_latin(word):
if consonant(word[0]) and not consonant(word[1]):
return pig_latin_1(word)
elif consonant(word[0]) and consonant(word[1]):
word = word[2:] + word[0:2] + 'a' + 'y'
return word
else:
return word + 'way'
def pig_latin_sentence(sentence):
s1 = sentence.split(' ')
res = []
for word in s1:
res.append(pig_latin(word))
return ' '.join(res)
def test_pig_latin():
words = ['pig', 'banana', 'trash', 'happy', 'duck', 'glove', 'eat', 'omelet', 'are']
for word in words:
print(word, "->", pig_latin(word))
test_pig_latin()
print( pig_latin_sentence("I am talking in pig Latin"))
|
mit
| -4,320,447,383,732,179,500
| 25.225
| 88
| 0.528626
| false
| 2.879121
| false
| false
| false
|
pkgpl/IPythonProcessing
|
pkprocess/pkapp.py
|
1
|
6022
|
import numpy as np
from numba import jit
import scipy.signal
import scipy.interpolate
from .pkbase import *
@jit
def triang(L):
# generate triangle
w=np.zeros(L)
if L%2==0: # even L
for i in range(int(L/2)):
n=i+1
w[i]=(2.*n-1.)/L
for i in range(int(L/2),L):
n=i+1
w[i]=2.-(2.*n-1.)/L
else: # odd L
for i in range(int((L+1)/2)):
n=i+1
w[i]=2.*n/(L+1.)
for i in range(int((L+1)/2),L):
n=i+1
w[i]=2.-2.*n/(L+1.)
return w
@jit
def gain(self,tpow=0,epow=0,agc=False,agc_gate=0.5,norm="rms"):
# Apply gain function
# tpow=0: t**tpow
# epow=0: exp(epow*t)
# agc=False: use automatic gain control (ignore tpow & epow)
# agc_gate: agc window size [seconds]
# norm='rms': normalize agc result by 'rms' or 'amplitude'
# output: gained SeismicTrace
trace = np.zeros_like(self.data)
data = self.data
ntr=get_ntr(self)
ns=get_ns(self)
dt=get_dt(self)
if not agc:
t = np.arange(ns)*dt
t = t**tpow * np.exp(t*epow)
for itr in range(ntr):
trace[itr,:] = data[itr,:]*t
else: # agc gain
L=agc_gate/dt+1
L=int(np.floor(L/2))
h=triang(2*L+1)
for k in range(ntr):
e=data[k,:]**2
rms=np.sqrt(np.convolve(e,h,'same'))
epsi=1.e-10*np.max(rms)
if epsi==0.: continue
op=rms/(rms**2+epsi)
trace[k,:]=data[k,:]*op
if norm=='amplitude': # normalize by amplitude
trace[k,:]/=np.max(np.abs(trace[k,:]))
elif norm=='rms':
trace[k,:]/=np.sqrt(np.sum(trace[k,:]**2)/ns)
out=SeismicTrace(self.header,trace,self.logs(),self.nmo_picks)
out.add_log("gain: tpow=%s epow=%s agc=%s agc_gate=%s norm=%s"%(tpow,epow,agc,agc_gate,norm))
return out
@jit
def bpfilter(self,cut_off):
# Band-pass filter
# cut_off: [min.freq, max.freq]: frequency range to pass
# output: band-pass filtered SeismicTrace
dt=get_dt(self)
nyq=0.5/dt
b,a=scipy.signal.butter(5,np.array(cut_off)/nyq,btype='band')
#w,h=scipy.signal.freqz(b,a)
trace=scipy.signal.lfilter(b,a,self.data,axis=1)
out=SeismicTrace(self.header,trace,self.logs(),self.nmo_picks)
out.add_log("bpfilter: %s"%cut_off)
return out
def stack(self):
# Stack NMO-corrected CMP gathers
# output: stacked SeismicTrace
cmps=get_key(self,'cdp')
cmpu=get_key_unique(self,'cdp')
ns=get_ns(self)
dt=get_dt(self)
ncdp=len(cmpu)
stacked=np.zeros((ncdp,ns))
#for i,icmp in enumerate(cmpu):
# su1=window(self,'cdp',icmp)
# stacked[i,:]=np.sum(su1.data,axis=0)
for i,su1 in enumerate(trace_split(self,'cdp')):
stacked[i,:]=np.sum(su1.data,axis=0)
head=np.zeros(stacked.shape[0],dtype=SU_HEADER_DTYPE)
head['ns']=np.ones(stacked.shape[0],dtype=np.int32)*ns
head['dt']=np.ones(stacked.shape[0],dtype=np.int32)*dt*1000000
head['cdp']=cmpu
fold_num = np.array([sum(icmp==cmps) for icmp in cmpu])
head['shortpad']=fold_num
out=SeismicTrace(head,stacked,self.logs(),self.nmo_picks)
out.add_log('stack')
return out
@jit
def stolt_mig(self,v,dx):
# Stolt migration of CMP stacked data
# v: constant velocity
# dx: CMP interval
# output: migrated SeismicTrace
# python port of ezfkmig from http://www.biomecardio.com
Dstacked=self.data.T
nt,ncdp=Dstacked.shape
dt=get_dt(self)
num_f_pts=nt
num_pts=num_f_pts
U_w_kx=np.fft.fftshift(np.fft.fft2(Dstacked,(num_f_pts,num_pts)))
# linear interpolation
omega=2.*np.pi*np.linspace(-0.5,0.5,num_f_pts)/dt
kx=2.*np.pi*np.linspace(-0.5,0.5,num_pts)/dx
vv=v/np.sqrt(2.)
kz=vv*np.sign(omega)*np.sqrt(kx**2+omega**2/vv**2)
func=scipy.interpolate.interp2d(omega,kx,np.real(U_w_kx))
ifunc=scipy.interpolate.interp2d(omega,kx,np.imag(U_w_kx))
U_kz_kx=func(kz,kx)+ifunc(kz,kx)*1.0j
Dmigrated=np.real(np.fft.ifft2(np.fft.ifftshift(U_kz_kx)))[:,:ncdp]
out=SeismicTrace(self.header,Dmigrated.T,self.logs(),self.nmo_picks)
out.add_log('stold_mig: v=%s dx=%s'%(v,dx))
return out
@jit
def kirchhoff1(image,gather,times,isx,igx,dt,tdelay):
nx=image.shape[0]
nz=image.shape[1]
ntr=gather.shape[0]
nt=gather.shape[1]
#cdef int ix,iz,itr,it
#cdef double ts,tg,amp
for itr in range(ntr):
for ix in range(nx):
for iz in range(nz):
ts=times[isx,ix,iz]
tg=times[igx[itr],ix,iz]
it=int((ts+tg+tdelay)/dt)
if it<nt:
amp=gather[itr,it]
image[ix,iz]+=amp
return image
@jit
def kirchhoff(sd,h,times,tdelay):
nx,nz=times[0].shape
image=np.zeros((nx,nz))
nt=get_ns(sd)
dt=get_dt(sd)
h_in_meter=h*1000.
gathers=trace_split(sd,"sx")
nshot=len(gathers)
for ishot,gather in enumerate(gathers):
if ishot %10 ==0:
print(ishot,nshot)
sx=get_key(gather,"sx")[0]
gx=np.array(get_key(gather,"gx"))
isx=int(sx/h_in_meter)
igx=(gx/h_in_meter).astype(np.int32)
image=kirchhoff1(image,gather.data,times,isx,igx,dt,tdelay)
return image
@jit
def moving_average2d(vel,r1,r2):
n1,n2=vel.shape
svel=np.empty_like(vel)
for i in range(n1):
for j in range(n2):
svel[i,j]=np.average(vel[max(0,i-r1):min(i+r1,n1),max(0,j-r2):min(j+r2,n2)])
return svel
@jit
def zdiff2(img):
dimg=np.zeros_like(img)
nz=img.shape[1]
for iz in range(1,nz-1):
dimg[:,iz]=img[:,iz-1]-2.*img[:,iz]+img[:,iz+1]
return dimg
@jit
def rmsvel(sd):
dt=get_dt(sd)
ns=get_ns(sd)
at=np.array(range(ns))*dt
dic=sd.nmo_picks
if len(dic)==0:
print("Please run this after velocity analysis!!")
return
ncmp=len(dic.keys())
v1=np.empty((ns,ncmp))
cmpnums=np.sort(dic.keys())
for icmp,cmpnum in enumerate(cmpnums):
vt=dic[cmpnum]
v=vt[0]
t=vt[1]
vinterp=np.interp(at,t,v)
v1[:,icmp]=vinterp
cmpmin=cmpnums.min()
cmpmax=cmpnums.max()
cmps=get_key_unique(sd,'cdp')
cmprange=[cmpn for cmpn in cmps if cmpmin<=cmpn and cmpn<=cmpmax]
vrms=np.empty((ns,len(cmprange)))
for it in range(ns):
vrms[it,:]=np.interp(cmprange,cmpnums,v1[it,:])
return vrms
@jit
def intervalvel(sd):
vrms=rmsvel(sd)
vmin=vrms.min()
vmax=vrms.max()
vrms=moving_average2d(vrms,50,20)
dt=get_dt(sd)
ns=get_ns(sd)
at=np.array(range(ns))*dt
vint=np.empty_like(vrms)
vint[0,:]=vrms[0,:]
for it in range(1,ns):
vint[it,:]=sqrt((vrms[it,:]**2*at[it]-vrms[it-1,:]**2*at[it-1])/(at[it]-at[it-1]))
return np.clip(vint,vmin,vmax)
|
gpl-3.0
| -7,563,901,515,248,345,000
| 24.302521
| 94
| 0.661574
| false
| 2.096797
| false
| false
| false
|
Forage/Gramps
|
gramps/webapp/grampsdb/view/note.py
|
1
|
6003
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Douglas S. Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#
""" Views for Person, Name, and Surname """
## Gramps Modules
from webapp.utils import _, boolean, update_last_changed, StyledNoteFormatter, parse_styled_text, build_search
from webapp.grampsdb.models import Note
from webapp.grampsdb.forms import *
from webapp.libdjango import DjangoInterface
from webapp.dbdjango import DbDjango
## Django Modules
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import Context, RequestContext
## Globals
dji = DjangoInterface()
db = DbDjango()
snf = StyledNoteFormatter(db)
# add a note to a person:
# /note/add/person/c51759195496de06da3ca5ba2c1
def process_note_on_name(request, action, handle, order):
# add, edit, delete
raise Exception("testing")
def process_note(request, context, handle, act, add_to=None): # view, edit, save
"""
Process act on person. Can return a redirect.
"""
context["tview"] = _("Note")
context["tviews"] = _("Notes")
context["action"] = "view"
view_template = "view_note_detail.html"
if handle == "add":
act = "add"
if "action" in request.POST:
act = request.POST.get("action")
# Handle: edit, view, add, create, save, delete, share, save-share
if act == "share":
item, handle = add_to
context["pickform"] = PickForm("Pick note",
Note,
(),
request.POST)
context["object_handle"] = handle
context["object_type"] = item
return render_to_response("pick.html", context)
elif act == "save-share":
item, handle = add_to
pickform = PickForm("Pick note",
Note,
(),
request.POST)
if pickform.data["picklist"]:
parent_model = dji.get_model(item) # what model?
parent_obj = parent_model.objects.get(handle=handle) # to add
ref_handle = pickform.data["picklist"]
ref_obj = Note.objects.get(handle=ref_handle)
dji.add_note_ref(parent_obj, ref_obj)
dji.rebuild_cache(parent_obj) # rebuild cache
return redirect("/%s/%s%s#tab-notes" % (item, handle, build_search(request)))
else:
context["pickform"] = pickform
context["object_handle"] = handle
context["object_type"] = item
return render_to_response("pick.html", context)
elif act == "add":
note = Note(gramps_id=dji.get_next_id(Note, "N"))
notetext = ""
noteform = NoteForm(instance=note, initial={"notetext": notetext})
noteform.model = note
elif act in ["view", "edit"]:
note = Note.objects.get(handle=handle)
genlibnote = db.get_note_from_handle(note.handle)
notetext = snf.format(genlibnote)
noteform = NoteForm(instance=note, initial={"notetext": notetext})
noteform.model = note
elif act == "save":
note = Note.objects.get(handle=handle)
notetext = ""
noteform = NoteForm(request.POST, instance=note, initial={"notetext": notetext})
noteform.model = note
if noteform.is_valid():
update_last_changed(note, request.user.username)
notedata = parse_styled_text(noteform.data["notetext"])
note.text = notedata[0]
note = noteform.save()
dji.save_note_markup(note, notedata[1])
dji.rebuild_cache(note)
notetext = noteform.data["notetext"]
act = "view"
else:
notetext = noteform.data["notetext"]
act = "edit"
elif act == "create":
note = Note(handle=create_id())
notetext = ""
noteform = NoteForm(request.POST, instance=note, initial={"notetext": notetext})
noteform.model = note
if noteform.is_valid():
update_last_changed(note, request.user.username)
notedata = parse_styled_text(noteform.data["notetext"])
note.text = notedata[0]
note = noteform.save()
dji.save_note_markup(note, notedata[1])
dji.rebuild_cache(note)
if add_to:
item, handle = add_to
model = dji.get_model(item)
obj = model.objects.get(handle=handle)
dji.add_note_ref(obj, note)
dji.rebuild_cache(obj)
return redirect("/%s/%s#tab-notes" % (item, handle))
notetext = noteform.data["notetext"]
act = "view"
else:
notetext = noteform.data["notetext"]
act = "add"
elif act == "delete":
# FIXME: delete markup too for this note
note = Note.objects.get(handle=handle)
note.delete()
return redirect("/note/")
else:
raise Exception("Unhandled act: '%s'" % act)
context["noteform"] = noteform
context["object"] = note
context["notetext"] = notetext
context["note"] = note
context["action"] = act
return render_to_response(view_template, context)
|
gpl-2.0
| -3,609,075,325,010,903,600
| 37.480769
| 110
| 0.596202
| false
| 3.801773
| false
| false
| false
|
unicef/un-partner-portal
|
backend/unpp_api/apps/common/models.py
|
1
|
3927
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from django.conf import settings
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator, FileExtensionValidator
from imagekit.models import ImageSpecField
from model_utils.models import TimeStampedModel
from pilkit.processors import ResizeToFill
from common.countries import COUNTRIES_ALPHA2_CODE, COUNTRIES_ALPHA2_CODE_DICT
class PointQuerySet(models.QuerySet):
def get_point(self, lat=None, lon=None, admin_level_1=None):
admin_level_1, _ = AdminLevel1.objects.get_or_create(
name=admin_level_1.get('name'),
country_code=admin_level_1['country_code'],
)
point, _ = self.get_or_create(lat=lat, lon=lon, admin_level_1=admin_level_1)
return point
class AdminLevel1(models.Model):
"""
Admin level 1 - is like California in USA or Mazowieckie in Poland
"""
name = models.CharField(max_length=255, null=True, blank=True)
country_code = models.CharField(max_length=3, choices=COUNTRIES_ALPHA2_CODE)
class Meta:
ordering = ['id']
unique_together = ('name', 'country_code')
def __str__(self):
return f"[{self.country_name}] {self.name}"
@property
def country_name(self):
return COUNTRIES_ALPHA2_CODE_DICT[self.country_code]
class Point(models.Model):
lat = models.DecimalField(
verbose_name='Latitude',
null=True,
blank=True,
max_digits=8,
decimal_places=5,
validators=[MinValueValidator(Decimal(-90)), MaxValueValidator(Decimal(90))]
)
lon = models.DecimalField(
verbose_name='Longitude',
null=True,
blank=True,
max_digits=8,
decimal_places=5,
validators=[MinValueValidator(Decimal(-180)), MaxValueValidator(Decimal(180))]
)
admin_level_1 = models.ForeignKey(AdminLevel1, related_name="points")
objects = PointQuerySet.as_manager()
class Meta:
ordering = ['id']
def __str__(self):
return "Point <pk:{}>".format(self.id)
class Sector(models.Model):
name = models.CharField(max_length=255)
class Meta:
ordering = ['id']
def __str__(self):
return "Sector: {} <pk:{}>".format(self.name, self.id)
class Specialization(models.Model):
name = models.CharField(max_length=255)
category = models.ForeignKey(Sector, related_name="specializations")
class Meta:
ordering = ['id']
def __str__(self):
return f'<{self.pk}> {self.category.name}: {self.name}'
class CommonFile(TimeStampedModel):
file_field = models.FileField(validators=(
FileExtensionValidator(settings.ALLOWED_EXTENSIONS),
))
# Only applicable for image files
__thumbnail = ImageSpecField(
source='file_field',
processors=[
ResizeToFill(150, 75)
],
format='JPEG',
options={
'quality': 80
},
)
class Meta:
ordering = ['id']
def __str__(self):
return f"CommonFile [{self.pk}] {self.file_field}"
@property
def thumbnail_url(self):
"""
Done this way to fail gracefully when trying to get thumbnail for non-image file
"""
try:
return self.__thumbnail.url
except OSError:
return None
@property
def has_existing_reference(self):
"""
Returns True if this file is referenced from at least one other object
"""
for attr_name in dir(self):
if attr_name == CommonFile.has_existing_reference.fget.__name__ or not hasattr(self, attr_name):
continue
attribute = getattr(self, attr_name)
if callable(getattr(attribute, 'exists', None)) and attribute.exists():
return True
return False
|
apache-2.0
| -2,077,369,729,848,358,400
| 27.251799
| 108
| 0.624141
| false
| 3.872781
| false
| false
| false
|
Zanzibar82/script.module.urlresolver
|
lib/urlresolver/plugins/exashare.py
|
1
|
6489
|
"""
Exashare.com urlresolver XBMC Addon
Copyright (C) 2014 JUL1EN094
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib,urllib2,os,re,xbmc
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import SiteAuth
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class ExashareResolver(Plugin,UrlResolver,PluginSettings):
implements = [UrlResolver,SiteAuth,PluginSettings]
name = "exashare"
domains = [ "exashare.com" ]
profile_path = common.profile_path
cookie_file = os.path.join(profile_path,'%s.cookies'%name)
USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:30.0) Gecko/20100101 Firefox/30.0'
def __init__(self):
p=self.get_setting('priority') or 100
self.priority=int(p)
self.net=Net()
#UrlResolver methods
def get_media_url(self, host, media_id):
base_url = 'http://www.' + host + '.com/' + media_id
headers = {'User-Agent': self.USER_AGENT, 'Referer': 'http://www.' + host + '.com/'}
try: html = self.net.http_GET(base_url).content
except: html = self.net.http_GET(base_url, headers=headers).content
if re.search("""File Not Found""", html):
raise UrlResolver.ResolverError('File not found or removed')
POST_Url = re.findall('form method="POST" action=\'(.*)\'',html)[0]
POST_Selected = re.findall('form method="POST" action=(.*)</Form>',html,re.DOTALL)[0]
POST_Data = {}
POST_Data['op'] = re.findall('input type="hidden" name="op" value="(.*)"',POST_Selected)[0]
POST_Data['usr_login'] = re.findall('input type="hidden" name="usr_login" value="(.*)"',POST_Selected)[0]
POST_Data['id'] = re.findall('input type="hidden" name="id" value="(.*)"',POST_Selected)[0]
POST_Data['fname'] = re.findall('input type="hidden" name="fname" value="(.*)"',POST_Selected)[0]
POST_Data['referer'] = re.findall('input type="hidden" name="referer" value="(.*)"',POST_Selected)[0]
POST_Data['hash'] = re.findall('input type="hidden" name="hash" value="(.*)"',POST_Selected)[0]
POST_Data['imhuman'] = 'Proceed to video'
try : html2 = self.net.http_POST(POST_Url,POST_Data).content
except : html2 = self.net.http_POST(POST_Url,POST_Data,headers=headers).content
stream_url = re.findall('file:\s*"([^"]+)"', html2)[0]
if self.get_setting('login') == 'true':
cookies = {}
for cookie in self.net._cj:
cookies[cookie.name] = cookie.value
if len(cookies) > 0:
stream_url = stream_url + '|' + urllib.urlencode({'Cookie': urllib.urlencode(cookies)})
common.addon.log('stream_url : ' + stream_url)
xbmc.sleep(7000)
return stream_url
def get_url(self,host,media_id):
return 'http://www.exashare.com/%s' % media_id
def get_host_and_id(self,url):
r=re.search('http://(?:www.)?(.+?).com/(?:embed\-)?([0-9A-Za-z_]+)(?:\-[0-9]+x[0-9]+.html)?',url)
if r:
ls=r.groups()
return ls
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled')=='false' or self.get_setting('login')=='false':
return False
return re.match('http://(?:www.)?exashare.com/(?:embed\-)?[0-9A-Za-z]+(?:\-[0-9]+x[0-9]+.html)?',url) or 'exashare.com' in host
#SiteAuth methods
def needLogin(self):
url='http://www.exashare.com/?op=my_account'
if not os.path.exists(self.cookie_file):
common.addon.log_debug('needLogin returning True')
return True
self.net.set_cookies(self.cookie_file)
source=self.net.http_GET(url).content
if re.search("""Your username is for logging in and cannot be changed""",source):
common.addon.log_debug('needLogin returning False')
return False
else:
common.addon.log_debug('needLogin returning True')
return True
def login(self):
if (self.get_setting('login')=='true'):
if self.needLogin():
common.addon.log('logging in exashare')
url='http://www.exashare.com/'
data={'login':self.get_setting('username'),'password':self.get_setting('password'),'op':'login','redirect':'/login.html'}
headers={'User-Agent':self.USER_AGENT,'Referer':url}
try: source=self.net.http_POST(url,data).content
except: source=self.net.http_POST(url,data,headers=headers).content
if re.search('Your username is for logging in and cannot be changed',source):
common.addon.log('logged in exashare')
self.net.save_cookies(self.cookie_file)
self.net.set_cookies(self.cookie_file)
return True
else:
common.addon.log('error logging in exashare')
return False
else:
if os.path.exists(self.cookie_file): os.remove(self.cookie_file)
return False
#PluginSettings methods
def get_settings_xml(self):
xml = PluginSettings.get_settings_xml(self)
xml += '<setting id="ExashareResolver_login" '
xml += 'type="bool" label="Login" default="false"/>\n'
xml += '<setting id="ExashareResolver_username" enable="eq(-1,true)" '
xml += 'type="text" label=" username" default=""/>\n'
xml += '<setting id="ExashareResolver_password" enable="eq(-2,true)" '
xml += 'type="text" label=" password" option="hidden" default=""/>\n'
return xml
|
gpl-2.0
| -6,906,511,386,082,544,000
| 48.159091
| 137
| 0.601788
| false
| 3.674405
| false
| false
| false
|
cc-it/odoo_mod
|
currency_rate_update/services/update_service_CA_BOC.py
|
1
|
4006
|
# -*- coding: utf-8 -*-
# © 2009 Camptocamp
# © 2014 Daniel Dico
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .currency_getter_interface import CurrencyGetterInterface
from openerp import _
from openerp.exceptions import except_orm
import logging
_logger = logging.getLogger(__name__)
class CA_BOCGetter(CurrencyGetterInterface):
"""Implementation of Curreny_getter_factory interface
for Bank of Canada RSS service
"""
# Bank of Canada is using RSS-CB
# http://www.cbwiki.net/wiki/index.php/Specification_1.1
# This RSS format is used by other national banks
# (Thailand, Malaysia, Mexico...)
code = 'CA_BOC'
name = 'Bank of Canada - noon rates'
supported_currency_array = [
"AED", "ANG", "ARS", "AUD", "BOC", "BRL", "BSD", "CHF", "CLP", "CNY",
"COP", "CZK", "DKK", "EUR", "FJD", "GBP", "GHS", "GTQ", "HKD", "HNL",
"HRK", "HUF", "IDR", "ILS", "INR", "ISK", "JMD", "JPY", "KRW", "LKR",
"MAD", "MMK", "MXN", "MYR", "NOK", "NZD", "PAB", "PEN", "PHP", "PKR",
"PLN", "RON", "RSD", "RUB", "SEK", "SGD", "THB", "TND", "TRY", "TTD",
"TWD", "USD", "VEF", "VND", "XAF", "XCD", "XPF", "ZAR"]
def get_updated_currency(self, currency_array, main_currency,
max_delta_days):
"""implementation of abstract method of Curreny_getter_interface"""
# as of Jan 2014 BOC is publishing noon rates for about 60 currencies
url = ('http://www.bankofcanada.ca/stats/assets/'
'rates_rss/noon/en_%s.xml')
# closing rates are available as well (please note there are only 12
# currencies reported):
# http://www.bankofcanada.ca/stats/assets/rates_rss/closing/en_%s.xml
# We do not want to update the main currency
if main_currency in currency_array:
currency_array.remove(main_currency)
import feedparser
import pytz
from dateutil import parser
for curr in currency_array:
_logger.debug("BOC currency rate service : connecting...")
dom = feedparser.parse(url % curr)
self.validate_cur(curr)
# check if BOC service is running
if dom.bozo and dom.status != 404:
_logger.error("Bank of Canada - service is down - try again\
later...")
# check if BOC sent a valid response for this currency
if dom.status != 200:
_logger.error("Exchange data for %s is not reported by Bank\
of Canada." % curr)
raise except_orm(_('Error !'), _('Exchange data for %s is not '
'reported by Bank of Canada.'
% str(curr)))
_logger.debug("BOC sent a valid RSS file for: " + curr)
# check for valid exchange data
if (dom.entries[0].cb_basecurrency == main_currency) and \
(dom.entries[0].cb_targetcurrency == curr):
rate = dom.entries[0].cb_exchangerate.split('\n', 1)[0]
rate_date_datetime = parser.parse(dom.entries[0].updated)\
.astimezone(pytz.utc).replace(tzinfo=None)
self.check_rate_date(rate_date_datetime, max_delta_days)
self.updated_currency[curr] = rate
_logger.debug("BOC Rate retrieved : %s = %s %s" %
(main_currency, rate, curr))
else:
_logger.error(
"Exchange data format error for Bank of Canada -"
"%s. Please check provider data format "
"and/or source code." % curr)
raise except_orm(_('Error !'),
_('Exchange data format error for '
'Bank of Canada - %s !' % str(curr)))
return self.updated_currency, self.log_info
|
mit
| -7,019,638,201,694,220,000
| 40.708333
| 79
| 0.539461
| false
| 3.724651
| false
| false
| false
|
MillerCMBLabUSC/lab_analysis
|
apps/4f_model/OldCode/IPCalc.py
|
1
|
7846
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 8 10:51:21 2017
@author: jlashner
"""
from pylab import *
import tmm
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as intg
"""
Constants and System Parameters
"""
#speed of light [m/s]
c =2.99792458 * 10**8
GHz = 10 ** 9
"""
Helpful functions to calculate IP / AR coating
"""
def getIP(n, d, freq, theta):
"""
Gets IP of optical elements
Parameters
----------
n : list
Index of refraction for each element in the stack
d : list
Thickness for each element in the stack
freq : float [Hz]
Frequency
theta:
Incident angle
"""
lam_vac = c / freq * 1000.
# lam_vac = 2.0
s = tmm.coh_tmm('s',n, d,theta,lam_vac)
p = tmm.coh_tmm('p',n, d,theta,lam_vac)
return -(s['T']-p['T'])/2
def getPolAbs(n, d, freq, theta):
"""
Gets Polarized Absorption of optical elements
Parameters
----------
n : list
Index of refraction for each element in the stack
d : list
Thickness for each element in the stack
freq : float [Hz]
Frequency
theta:
Incident angle
"""
lam_vac = c / freq * 1000.
s = tmm.coh_tmm('s',n, d,theta,lam_vac)
p = tmm.coh_tmm('p',n, d,theta,lam_vac)
sA = 1 - s['T'] - s['R']
pA = 1 - p['T'] - p['R']
return -((sA - pA)/2)
def getBandAverage(n, d, freq, fbw, theta, divisions=100):
"""
Gets band averaged IP of stack
Parameters
----------
n : list
Index of refraction for each element in the stack
d : list
Thickness for each element in the stack
freq :
Band center
fbw:
Fractional bandwidth
theta:
Incident angle
"""
flo = freq * (1. - .5 * fbw)
fhi = freq * (1. + .5 * fbw)
fs = np.linspace(flo, fhi, divisions)
ips = np.array(map(lambda x : getIP(n,d,x, theta), fs))
return trapz(ips, fs) / (fhi - flo)
def getBandAverageAbs(n, d, freq, fbw, theta, divisions=100):
"""
Gets band averaged IP of stack
Parameters
----------
n : list
Index of refraction for each element in the stack
d : list
Thickness for each element in the stack
freq :
Band center
fbw:
Fractional bandwidth
theta:
Incident angle
"""
flo = freq * (1. - .5 * fbw)
fhi = freq * (1. + .5 * fbw)
fs = np.linspace(flo, fhi, divisions)
ips = np.array(map(lambda x : getPolAbs(n,d,x, theta), fs))
return trapz(ips, fs) / (fhi - flo)
def ARCoat(n, lam0):
"""
Gets Index of refraction and thickness for AR coating
Parameters
----------
n : float
Index of refraction of element to be coated
lam0 : float
Optimized Wavelength [mm]
"""
ni= .00008
nAR = [real(n)**(1./3) + ni*1j, real(n)**(2./3) + ni * 1j]
dAR = map(lambda x : lam0 / (4.0 * real(x)), nAR)
return nAR, dAR
def ARCoatOld(n, lam0):
"""
Gets Index of refraction and thickness for AR coating
Parameters
----------
n : float
Index of refraction of element to be coated
lam0 : float
Optimized Wavelength [mm]
"""
nAR = [real(n)**(1./2)]
dAR = map(lambda x : lam0 / (4.0 * real(x)), nAR)
return nAR, dAR
def getWinIP(freq, fbw, theta):
"""
Gets IP for a window
Parameters
==========
freq : float [Hz]
Band center
fbw : float
Fractional Bandwidth
theta : float [rad]
Incident angle
"""
n = 1.5 + .0001j
nARwin, dARwin = ARCoat(n, 2.5)
n_window = [1.0] + nARwin + [n] + nARwin[::-1] + [1.0]
d_window = [Inf] + dARwin + [5.0] + dARwin[::-1] + [Inf]
return (getBandAverage(n_window, d_window, freq, fbw, theta), \
getBandAverageAbs(n_window, d_window, freq, fbw, theta))
def getFilterIP(freq, fbw, theta):
"""
Gets IP for a window
Parameters
==========
freq : float [Hz]
Band center
fbw : float
Fractional Bandwidth
theta : float [rad]
Incident angle
"""
n = 3.1 + .00008j
nAR, dAR = ARCoat(n, 2.5)
n_AluminaF = [1.0] + nAR + [n] + nAR[::-1] + [1.0]
d_AluminaF = [Inf] + dAR + [2.0] + dAR[::-1] + [Inf]
return (getBandAverage(n_AluminaF, d_AluminaF, freq, fbw, theta), \
getBandAverageAbs(n_AluminaF, d_AluminaF, freq, fbw, theta))
if __name__ == "__main__":
bc = np.array([93.0 * GHz,145. * GHz]) # Band center [Hz]
fbw = np.array([.376, .276]) #Fractional bandwidth
flo = bc * (1 - fbw/2.)
fhi = bc * (1 + fbw/2.)
thetas = map(np.deg2rad, [15./2,20./2,25./2,30./2])
for t in thetas:
wIP1, fIP1 = getWinIP(bc[0], fbw[0], t)[0]*100, getFilterIP(bc[0], fbw[0], t)[0]*100
wIP2, fIP2 = getWinIP(bc[1], fbw[1], t)[0]*100, getFilterIP(bc[1], fbw[1], t)[0]*100
print "%.1f & %.3f & %.3f & %.3f & %.3f & %.3f & %.3f\\\\"%(np.rad2deg(t), wIP1, wIP2, fIP1, fIP2, wIP1 + 2 * fIP2, wIP2 + 2 * fIP2)
# nARwin, dARwin = ARCoat(1.5, 2.5)
# n = 1.5 + .0001j
# n_window = [1.0] + nARwin + [n] + nARwin[::-1] + [1.0]
# d_window = [Inf] + dARwin + [5.0] + dARwin[::-1] + [Inf]
##
## n = 3.1 + .00008j
## nAR, dAR = ARCoat(n, 2.5)
## n_AluminaF = [1.0] + nAR + [n] + nAR[::-1] + [1.0]
## d_AluminaF = [Inf] + dAR + [2.0] + dAR[::-1] + [Inf]
## freqs = np.linspace(flo[0], fhi[1], 100)
## refs = []
## for f in freqs:
## lam = c / f * 1000
## refs += [tmm.coh_tmm('s',n_AluminaF, d_AluminaF, theta,lam)['R']]
##
## plt.plot(freqs, refs)
## plt.show()
##
##
# print getFilterIP(band_center[0], fbw[0], np.deg2rad(15.))
#
# i = 1
# theta = np.deg2rad(15.)
# freqs = np.linspace(flo[i], fhi[i], 100)
#
# s_array = []
# p_array = []
#
# for f in freqs:
# lam = c / f * 1000
#
# s_array += [tmm.coh_tmm('s',n_AluminaF, d_AluminaF, theta,lam)]
# p_array += [tmm.coh_tmm('p',n_AluminaF, d_AluminaF, theta,lam)]
#
# ts = np.array(map(lambda x : x['T'], s_array))
# tp = np.array(map(lambda x : x['T'], p_array))
# rs = np.array(map(lambda x : x['R'], s_array))
# rp = np.array(map(lambda x : x['R'], p_array))
# As = 1 - ts - rs
# Ap = 1 - tp - rp
# tsave = trapz(ts, freqs) / (fhi[i]- flo[i] )
# tpave = trapz(tp, freqs) / (fhi[i]- flo[i] )
# print trapz((ts - tp)/2, freqs) / (fhi[i]- flo[i] )
# rsave = trapz(rs, freqs) / (fhi[i]- flo[i] )
# rpave = trapz(rp, freqs) / (fhi[i]- flo[i] )
# Asave = trapz(As, freqs) / (fhi[i]- flo[i] )
# Apave = trapz(Ap, freqs) / (fhi[i]- flo[i] )
#
# print tsave, rsave, Asave
# print tpave, rpave, Apave
# print .5 * (tsave - tpave), .5 * (rsave - rpave), .5 * (Asave - Apave)
#
#
#
# ips93 = []
# ips145 = []
# ips93Old = []
# ips145Old = []
# freqs = np.linspace(90. * GHz, 160 * GHz, 50)
#
#
#
#
# for f0 in freqs:
# lam0 = c / f0 * 1000.
# nARwin, dARwin = ARCoat(1.5, lam0)
# n_window = [1.0] + nARwin + [1.5] + nARwin[::-1] + [1.0]
# d_window = [Inf] + dARwin + [5.0] + dARwin[::-1] + [Inf]
# theta = np.deg2rad(30.0/2)
# ips93 += [getBandAverage(n_window, d_window, band_center[0], fbw[0], theta)]
# ips145 += [getBandAverage(n_window, d_window, band_center[1], fbw[1], theta)]
#
# nARwin, dARwin = ARCoatOld(1.5, lam0)
# n_window = [1.0] + nARwin + [1.5] + nARwin[::-1] + [1.0]
# d_window = [Inf] + dARwin + [5.0] + dARwin[::-1] + [Inf]
# theta = np.deg2rad(30.0/2)
# ips93Old += [getBandAverage(n_window, d_window, band_center[0], fbw[0], theta)]
# ips145Old += [getBandAverage(n_window, d_window, band_center[1], fbw[1], theta)]
#
#
#
|
gpl-2.0
| 4,520,980,213,326,978,600
| 23.442368
| 140
| 0.520902
| false
| 2.562378
| false
| false
| false
|
M4rtinK/modrana
|
modules/gui_modules/gui_qt5/gui_qt5.py
|
1
|
42517
|
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# A modRana Qt 5 QtQuick 2.0 GUI module
# * it inherits everything in the base GUI module
# * overrides default functions and handling
#----------------------------------------------------------------------------
# Copyright 2013, Martin Kolman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
import os
import re
import pyotherside
try:
from StringIO import StringIO # python 2
except ImportError:
from io import StringIO # python 3
# modRana imports
import math
from modules.gui_modules.base_gui_module import GUIModule
import time
import threading
from core import signal
from core import constants
from core.threads import threadMgr
from core import modrana_log
from core import utils
from core import paths
from core import point
import logging
no_prefix_log = logging.getLogger()
log = logging.getLogger("mod.gui.qt5")
qml_log = logging.getLogger("mod.gui.qt5.qml")
SEARCH_STATUS_PREFIX = "search:status:"
SEARCH_RESULT_PREFIX = "search:result:"
def newlines2brs(text):
""" QML uses <br> instead of \n for linebreak """
return re.sub('\n', '<br>', text)
def getModule(*args, **kwargs):
return QMLGUI(*args, **kwargs)
def point2dict(point):
""" Convert a Point instance to a dict
:param Point point: a Point object instance
:returns dict: a dict representation of the point
"""
return {
"name" : point.name,
"description" : point.description,
"latitude" : point.lat,
"longitude" : point.lon,
"elevation" : point.elevation,
"highlight" : False,
"mDistance" : 0, # will be filled in on QML side
"db_id" : getattr(point, "db_index", None),
"category_id" : getattr(point, "db_category_index", None)
}
class QMLGUI(GUIModule):
"""A Qt 5 + QtQuick 2 GUI module"""
def __init__(self, *args, **kwargs):
GUIModule.__init__(self, *args, **kwargs)
# some constants
self.msLongPress = 400
self.centeringDisableThreshold = 2048
self.firstTimeSignal = signal.Signal()
size = (800, 480) # initial window size
self._screen_size = None
# positioning related
self._pythonPositioning = False
# we handle notifications by forwarding them to the QML context
self.modrana.notificationTriggered.connect(self._dispatchNotificationCB)
# register exit handler
#pyotherside.atexit(self._shutdown)
# FIXME: for some reason the exit handler is never
# called on Sailfish OS, so we use a onDestruction
# handler on the QML side to trigger shutdown
# window state
self._fullscreen = False
# get screen resolution
# TODO: implement this
#screenWH = self.getScreenWH()
#self.log.debug(" @ screen size: %dx%d" % screenWH)
#if self.highDPI:
# self.log.debug(" @ high DPI")
#else:
# self.log.debug(" @ normal DPI")
# NOTE: what about multi-display devices ? :)
## add image providers
self._imageProviders = {
"icon" : IconImageProvider(self),
"tile" : TileImageProvider(self),
}
# log what version of PyOtherSide we are using
# - we log this without prefix as this shows up early
# during startup, so it looks nicer that way :-)
no_prefix_log.info("using PyOtherSide %s", pyotherside.version)
## register the actual callback, that
## will call the appropriate provider base on
## image id prefix
pyotherside.set_image_provider(self._selectImageProviderCB)
# initialize theming
self._theme = Theme(self)
## make constants accessible
#self.constants = self.getConstants()
#rc.setContextProperty("C", self.constants)
## connect to the close event
#self.window.closeEvent = self._qtWindowClosed
##self.window.show()
self._notificationQueue = []
# provides easy access to modRana modules from QML
self.modules = Modules(self)
# search functionality for the QML context
self.search = Search(self)
# POI handling for the QML context
self.POI = POI(self)
# make the log manager easily accessible
self.log_manager = modrana_log.log_manager
# log for log messages from the QML context
self.qml_log = qml_log
# queue a notification to QML context that
# a Python loggers is available
pyotherside.send("loggerAvailable")
# tracklogs
self.tracklogs = Tracklogs(self)
#routing
self.routing = Routing(self)
# turn by turn navigation
self.navigation = Navigation(self)
def firstTime(self):
# trigger the first time signal
self.firstTimeSignal()
self.modules.location.positionUpdate.connect(self._pythonPositionUpdateCB)
def _shutdown(self):
"""Called by PyOtherSide once the QML side is shutdown.
"""
self.log.info("Qt 5 GUI module shutting down")
self.modrana.shutdown()
def getIDString(self):
return "Qt5"
def needsLocalhostTileserver(self):
"""
the QML GUI needs the localhost tileserver
for efficient and responsive tile loading
"""
return False
def isFullscreen(self):
return self._fullscreen
def toggleFullscreen(self):
# TODO: implement this
pass
def setFullscreen(self, value):
pass
def setCDDragThreshold(self, threshold):
"""set the threshold which needs to be reached to disable centering while dragging
basically, larger threshold = longer drag is needed to disable centering
default value = 2048
"""
self.centeringDisableThreshold = threshold
def hasNotificationSupport(self):
return True
def _dispatchNotificationCB(self, text, msTimeout=5000, icon=""):
"""Let the QML context know that it should show a notification
:param str text: text of the notification message
:param int msTimeout: how long to show the notification in ms
"""
self.log.debug("notify:\n message: %s, timeout: %d" % (text, msTimeout))
pyotherside.send("pythonNotify", {
"message" : newlines2brs(text), # QML uses <br> in place of \n
"timeout" : msTimeout
})
def openUrl(self, url):
# TODO: implement this
pass
def _getTileserverPort(self):
m = self.m.get("tileserver", None)
if m:
return m.getServerPort()
else:
return None
def getScreenWH(self):
return self._screen_size
def getModRanaVersion(self):
"""
report current modRana version or None if version info is not available
"""
version = self.modrana.paths.version_string
if version is None:
return "unknown"
else:
return version
def setPosition(self, posDict):
if self._pythonPositioning:
# ignore the setPosition call if Python-side positioning
# is used as the Python side already has fresh position data
return
lat, lon = float(posDict["latitude"]), float(posDict["longitude"])
elevation = float(posDict["elevation"])
metersPerSecSpeed = float(posDict["speedMPS"]) # m/s
# report that we have 3D fix
# (looks like we can't currently reliably discern between 2D
# and 3D fix on the Jolla, might be good to check what other
# Sailfish OS running devices report)
self.set("fix", 3)
self.set("pos", (lat, lon))
# check if elevation is valid
if not math.isnan(elevation):
self.set("elevation", elevation)
else:
self.set("elevation", None)
# check if speed is valid
if not math.isnan(metersPerSecSpeed):
self.set("speed", metersPerSecSpeed*3.6)
self.set("metersPerSecSpeed", metersPerSecSpeed)
else:
self.set("speed", None)
self.set("metersPerSecSpeed", None)
# update done
self.set('locationUpdated', time.time())
# TODO: move part of this to the location module ?
def _pythonPositionUpdateCB(self, fix):
self._pythonPositioning = True
if fix.position:
(lat, lon) = fix.position
else:
(lat, lon) = None, None
# magnetic variation might sometimes not be set
magnetic_variation = 0.0
magnetic_variation_valid = False
if fix.magnetic_variation is not None:
magnetic_variation = fix.magnetic_variation
magnetic_variation_valid = True
pyotherside.send("pythonPositionUpdate", {
"latitude" : lat,
"longitude" : lon,
"altitude" : fix.altitude,
"speed" : fix.speed,
"verticalSpeed" : fix.climb,
"horizontalAccuracy" : fix.horizontal_accuracy,
"verticalAccuracy" : fix.vertical_accuracy,
"direction" : fix.bearing,
"magneticVariation" : magnetic_variation,
"magneticVariationValid" : magnetic_variation_valid,
"timestamp" : fix.timestamp,
"valid" : bool(fix.position)
})
def _selectImageProviderCB(self, imageId, requestedSize):
originalImageId = imageId
providerId = ""
#self.log.debug("SELECT IMAGE PROVIDER")
#self.log.debug(imageId)
#self.log.debug(imageId.split("/", 1))
try:
# split out the provider id
providerId, imageId = imageId.split("/", 1)
# get the provider and call its getImage()
return self._imageProviders[providerId].getImage(imageId, requestedSize)
except ValueError: # provider id missing or image ID overall wrong
self.log.error("provider ID missing: %s", originalImageId)
except AttributeError: # missing provider (we are calling methods of None ;) )
if providerId:
self.log.error("image provider for this ID is missing: %s", providerId)
else:
self.log.error("image provider broken, image id: %s", originalImageId)
except Exception: # catch and report the rest
self.log.exception("image loading failed, imageId: %s", originalImageId)
def _tileId2lzxy(self, tileId):
"""Convert tile id string to the "standard" lzxy tuple
:param str tileId: map instance name/layer id/z/x/y
:returns: lzxy tuple
:rtype: tuple
"""
split = tileId.split("/")
# pinchMapId = split[0]
layerId = split[1]
z = int(split[2])
x = int(split[3])
y = int(split[4])
# TODO: local id:layer cache ?
layer = self.modules.mapLayers.getLayerById(layerId)
return layer, z, x, y
def areTilesAvailable(self, tile_ids):
"""Report if tiles are available & request download for those that are not.
:param list tile_ids: list of tile ids to check
:return: a distionary of tile states, True = available, False = will be downloaded
:rtype: dict
"""
available_tiles = {}
for tile_id in tile_ids:
available_tiles[tile_id] = self.isTileAvailable(tile_id)
return available_tiles
def isTileAvailable(self, tileId):
"""Check if tile is available and add download request if not.
NOTE: If automatic tile downloads are disabled tile download
request will not be queued.
:param str tileId: tile identificator
:return: True if the tile is locally available, False if not
:rtype: bool
"""
lzxy = self._tileId2lzxy(tileId)
if self.modules.mapTiles.tileInStorage(lzxy):
return True
else:
self._addTileDownloadRequest(lzxy, tileId)
return False
def _addTileDownloadRequest(self, lzxy, tileId):
"""Add an asynchronous download request, the tile will be
notified once the download is finished or fails
:param string tileId: unique tile id
"""
try:
self.modules.mapTiles.addTileDownloadRequest(lzxy, tileId)
except Exception:
self.log.exception("adding tile download request failed")
def _getStartupValues(self):
""" Return a dict of values needed by the Qt 5 GUI right after startup.
By grouping the requested values in a single dict we reduce the number
of Python <-> QML roundtrips and also make it possible to more easily
get these values asynchronously (values arrive all at the same time,
not in random order at random time).
:returns: a dict gathering the requested values
:rtype dict:
"""
values = {
"modRanaVersion" : self.getModRanaVersion(),
"constants" : self.getConstants(),
"show_quit_button": self.showQuitButton(),
"fullscreen_only": self.modrana.dmod.fullscreen_only,
"should_start_in_fullscreen": self.shouldStartInFullscreen(),
"needs_back_button": self.modrana.dmod.needs_back_button,
"needs_page_background": self.modrana.dmod.needs_page_background,
"lastKnownPos" : self.get("pos", None),
"gpsEnabled" : self.get("GPSEnabled", True),
"posFromFile" : self.get("posFromFile", None),
"nmeaFilePath" : self.get("NMEAFilePath", None),
"layerTree" : self.modules.mapLayers.getLayerTree(),
"dictOfLayerDicts" : self.modules.mapLayers.getDictOfLayerDicts(),
"themesFolderPath" : os.path.abspath(self.modrana.paths.themes_folder_path),
"sailfish" : self.dmod.device_id == "jolla",
"device_type" : self.modrana.dmod.device_type,
"highDPI" : self.highDPI,
"defaultTileStorageType" : self.modrana.dmod.defaultTileStorageType,
"aboutModrana" : self._get_about_info()
}
return values
def _set_screen_size(self, screen_size):
"""A method called by QML to report current screen size in pixels.
:param screen_size: screen width and height in pixels
:type screen_size: a tuple of integers
"""
self._screen_size = screen_size
def _get_about_info(self):
info = self.modules.info
return {
"email_address" : info.email_address,
"website_url" : info.website_url,
"source_repository_url" : info.source_repository_url,
"discussion_url" : info.main_discussion[0],
"translation_url" : info.translation_url,
"pay_pal_url" : info.pay_pal_url,
"flattr_url" : info.flattr_url,
"gratipay_url" : info.gratipay_url,
"bitcoin_address" : info.bitcoin_address
}
class Modules(object):
"""A class that provides access to modRana modules from the QML context,
using the __getattr__ method so that QML can access all modules dynamically
with normal dot notation
"""
def __init__(self, gui):
self._info = None
self._stats = None
self._mapLayers = None
self._storeTiles = None
self.gui = gui
def __getattr__(self, moduleName):
return self.gui.m.get(moduleName, None)
class POI(object):
"""An easy to use POI interface for the QML context"""
def __init__(self, gui):
self.gui = gui
def list_used_categories(self):
db = self.gui.modules.storePOI.db
cat_list = []
for category in db.list_used_categories():
category_id = category[2]
poi_count = len(db.get_all_poi_from_category(category_id)) # do this more efficiently
cat_list.append({
"name" : category[0],
"description" : category[1],
"poi_count" : poi_count,
"category_id" : category_id
})
return cat_list
def _db_changed(self):
"""Notify QML that the POI database has been changed.
This can be used to reload various caches and views.
"""
pyotherside.send("poiDatabaseChanged")
def _new_poi_added(self, new_poi_dict):
"""Notify QML that a new POI has been added"""
pyotherside.send("newPoiAddedToDatabase", new_poi_dict)
def store_poi(self, point_dict):
success = False
db = self.gui.modules.storePOI.db
name = point_dict.get("name")
description = point_dict.get("description", "")
lat = point_dict.get("lat")
lon = point_dict.get("lon")
category_id = point_dict.get("category_id")
# make sure lat & lon is a floating point number
try:
lat = float(lat)
lon = float(lon)
except Exception:
self.gui.log.exception("can't save POI: lat or lon not float")
# default to "Other" if no category is provided
if category_id is None:
category_id = 11 # TODO: this should ge dynamically queried from the database
# sanity check
if name and lon is not None and lat is not None:
poi = point.POI(name=name,
description=description,
lat=lat,
lon=lon,
db_cat_id=category_id)
self.gui.log.info("saving POI: %s", poi)
poi_db_index = db.store_poi(poi)
self.gui.log.info("POI saved")
success = True
# notify QML a new POI was added
new_poi_dict = point2dict(point.POI(name,
description,
lat,
lon,
category_id, poi_db_index))
self._new_poi_added(new_poi_dict)
else:
self.gui.log.error("cant's save poi, missing name or coordinates: %s", point_dict)
if success:
self._db_changed()
return success
def get_all_poi_from_category(self, category_id):
db = self.gui.modules.storePOI.db
poi_list = []
for poi_tuple in db.get_all_poi_from_category(category_id):
# TODO: to this already in poi_db
(name, desc, lat, lon, poi_id) = poi_tuple
poi_dict = point2dict(point.POI(name, desc, lat, lon, category_id, poi_id))
poi_list.append(poi_dict)
return poi_list
def delete_poi(self, poi_db_index):
log.debug("deleting POI with db index %s", poi_db_index)
db = self.gui.modules.storePOI.db
db.delete_poi(poi_db_index)
self._db_changed()
class Search(object):
"""An easy to use search interface for the QML context"""
def __init__(self, gui):
self.gui = gui
self._threadsInProgress = {}
# register the thread status changed callback
threadMgr.threadStatusChanged.connect(self._threadStatusCB)
def search(self, searchId, query, searchPoint=None):
"""Trigger an asynchronous search (specified by search id)
for the given term
:param str query: search query
"""
online = self.gui.m.get("onlineServices", None)
if online:
# construct result handling callback
callback = lambda x : self._searchCB(searchId, x)
# get search function corresponding to the search id
searchFunction = self._getSearchFunction(searchId)
# start the search and remember the search thread id
# so we can use it to track search progress
# (there might be more searches in progress so we
# need to know the unique search thread id)
if searchId == "local" and searchPoint:
pointInstance = point.Point(searchPoint.latitude, searchPoint.longitude)
threadId = searchFunction(query, callback, around=pointInstance)
else:
threadId = searchFunction(query, callback)
self._threadsInProgress[threadId] = searchId
return threadId
def _searchCB(self, searchId, results):
"""Handle address search results
:param list results: address search results
"""
resultList = []
for result in results:
resultList.append(point2dict(result))
resultId = SEARCH_RESULT_PREFIX + searchId
pyotherside.send(resultId, resultList)
thisThread = threading.currentThread()
# remove the finished thread from tracking
if thisThread.name in self._threadsInProgress:
del self._threadsInProgress[thisThread.name]
def cancelSearch(self, threadId):
"""Cancel the given asynchronous search thread"""
log.info("canceling search thread: %s", threadId)
threadMgr.cancel_thread(threadId)
if threadId in self._threadsInProgress:
del self._threadsInProgress[threadId]
def _threadStatusCB(self, threadName, threadStatus):
# check if the event corresponds to some of the
# in-progress search threads
recipient = self._threadsInProgress.get(threadName)
if recipient:
statusId = SEARCH_STATUS_PREFIX + recipient
pyotherside.send(statusId, threadStatus)
def _getSearchFunction(self, searchId):
"""Return the search function object for the given searchId"""
online = self.gui.m.get("onlineServices", None)
if online:
if searchId == "address":
return online.geocodeAsync
elif searchId == "wikipedia":
return online.wikipediaSearchAsync
elif searchId == "local":
return online.localSearchAsync
else:
log.error("search function for id: %s not found", searchId)
return None
else:
log.error("onlineServices module not found")
class ImageProvider(object):
"""PyOtherSide image provider base class"""
def __init__(self, gui):
self.gui = gui
def getImage(self, imageId, requestedSize):
pass
class IconImageProvider(ImageProvider):
"""the IconImageProvider class provides icon images to the QML layer as
QML does not seem to handle .. in the url very well"""
def __init__(self, gui):
ImageProvider.__init__(self, gui)
def getImage(self, imageId, requestedSize):
#log.debug("ICON!")
#log.debug(imageId)
try:
#TODO: theme name caching ?
themeFolder = self.gui.modrana.paths.themes_folder_path
# fullIconPath = os.path.join(themeFolder, imageId)
# the path is constructed like this in QML
# so we can safely just split it like this
splitPath = imageId.split("/")
# remove any Ambiance specific garbage appended by Silica
splitPath[-1] = splitPath[-1].rsplit("?")[0]
fullIconPath = os.path.join(themeFolder, *splitPath)
extension = os.path.splitext(fullIconPath)[1]
# set correct data format based on the extension
if extension.lower() == ".svg":
data_format = pyotherside.format_svg_data
else:
data_format = pyotherside.format_data
if not utils.internal_isfile(fullIconPath):
if splitPath[0] == constants.DEFAULT_THEME_ID:
# already on default theme and icon path does not exist
log.error("Icon not found in default theme:")
log.error(fullIconPath)
return None
else: # try to get the icon from default theme
splitPath[0] = constants.DEFAULT_THEME_ID
fullIconPath = os.path.join(themeFolder, *splitPath)
if not utils.internal_isfile(fullIconPath):
# icon not found even in the default theme
log.error("Icon not found even in default theme:")
log.error(fullIconPath)
return None
# We only set height or else SVG icons would be squished if a square icon
# has been requested but the SVG icon is not square. If just height is
# we the clever SVG handling code (which I wrote ;-) ) will handle this correctly. :)
return utils.internal_get_file_contents(fullIconPath), (-1, requestedSize[1]), data_format
except Exception:
log.exception("icon image provider: loading icon failed, id:\n%s" % imageId)
class TileImageProvider(ImageProvider):
"""
the TileImageProvider class provides images images to the QML map element
"""
def __init__(self, gui):
ImageProvider.__init__(self, gui)
self.gui = gui
self.gui.firstTimeSignal.connect(self._firstTimeCB)
self._tileNotFoundImage = bytearray([0, 255, 255, 255])
def _firstTimeCB(self):
# connect to the tile downloaded callback so that we can notify
# the QML context that a tile has ben downloaded and should be
# shown on the screen
# NOTE: we need to wait for the firstTime signal as at GUI module init
# the other modules (other than the device module) are not yet initialized
self.gui.modules.mapTiles.tileDownloaded.connect(self._tileDownloadedCB)
def _tileDownloadedCB(self, error, lzxy, tag):
"""Notify the QML context that a tile has been downloaded"""
pinchMapId = tag.split("/")[0]
#log.debug("SENDING: %s %s" % ("tileDownloaded:%s" % pinchMapId, tag))
resoundingSuccess = error == constants.TILE_DOWNLOAD_SUCCESS
fatalError = error == constants.TILE_DOWNLOAD_ERROR
pyotherside.send("tileDownloaded:%s" % pinchMapId, tag, resoundingSuccess, fatalError)
def getImage(self, imageId, requestedSize):
"""
the tile info should look like this:
layerID/zl/x/y
"""
#log.debug("TILE REQUESTED %s" % imageId)
#log.debug(requestedSize)
try:
# split the string provided by QML
split = imageId.split("/")
pinchMapId = split[0]
layerId = split[1]
z = int(split[2])
x = int(split[3])
y = int(split[4])
# TODO: local id:layer cache ?
layer = self.gui.modules.mapLayers.getLayerById(layerId)
# construct the tag
#tag = (pinchMapId, layerId, z, x, y)
#tag = (pinchMapId, layerId, z, x, y)
# get the tile from the tile module
tileData = self.gui.modules.mapTiles.getTile((layer, z, x, y),
asynchronous=True, tag=imageId,
download=False)
imageSize = (256,256)
if tileData is None:
# The tile was not found locally
# * in persistent storage (files/sqlite db)
# * in the tile cache in memory
# An asynchronous tile download request has been added
# automatically, so we just now need to notify the
# QtQuick GUI that it should wait fo the download
# completed signal.
#
# We notify the GUI by returning a 1x1 image.
return self._tileNotFoundImage, (1,1), pyotherside.format_argb32
#log.debug("%s NOT FOUND" % imageId)
#log.debug("RETURNING STUFF %d %s" % (imageSize[0], imageId))
return bytearray(tileData), imageSize, pyotherside.format_data
except Exception:
log.error("tile image provider: loading tile failed")
log.error(imageId)
log.error(requestedSize)
log.exception("tile image provider exception")
class MapTiles(object):
def __init__(self, gui):
self.gui = gui
@property
def tileserverPort(self):
port = self.gui._getTileserverPort()
if port:
return port
else: # None,0 == 0 in QML
return 0
def loadTile(self, layerId, z, x, y):
"""
load a given tile from storage and/or from the network
True - tile already in storage or in memory
False - tile download in progress, retry in a while
"""
# log.debug(layerId, z, x, y)
if self.gui.mapTiles.tileInMemory(layerId, z, x, y):
# log.debug("available in memory")
return True
elif self.gui.mapTiles.tileInStorage(layerId, z, x, y):
# log.debug("available in storage")
return True
else: # not in memory or storage
# add a tile download request
self.gui.mapTiles.addTileDownloadRequest(layerId, z, x, y)
# log.debug("downloading, try later")
return False
class _Search(object):
_addressSignal = signal.Signal()
changed = signal.Signal()
test = signal.Signal()
def __init__(self, gui):
self.gui = gui
self._addressSearchResults = None
self._addressSearchStatus = "Searching..."
self._addressSearchInProgress = False
self._addressSearchThreadName = None
self._localSearchResults = None
self._wikipediaSearchResults = None
self._routeSearchResults = None
self._POIDBSearchResults = None
# why are wee keeping our own dictionary of wrapped
# objects and not just returning a newly wrapped object on demand ?
# -> because PySide (1.1.1) segfaults if we don't hold any reference
# on the object returned :)
# register the thread status changed callback
threadMgr.threadStatusChanged.connect(self._threadStatusCB)
def _threadStatusCB(self, threadName, threadStatus):
if threadName == self._addressSearchThreadName:
self._addressSearchStatus = threadStatus
self._addressSignal()
def address(self, address):
"""Trigger an asynchronous address search for the given term
:param address: address search query
:type address: str
"""
online = self.gui.m.get("onlineServices", None)
if online:
self._addressSearchThreadName = online.geocodeAsync(
address, self._addressSearchCB
)
self._addressSearchInProgress = True
self._addressSignal()
def addressCancel(self):
"""Cancel the asynchronous address search"""
threadMgr.cancel_thread(self._addressSearchThreadName)
self._addressSearchInProgress = False
self._addressSearchStatus = "Searching..."
self._addressSignal()
def _addressSearchCB(self, results):
"""Replace old address search results (if any) with
new (wrapped) results
:param results: address search results
:type results: list
"""
#self.gui._addressSearchListModel.set_objects(
# wrapList(results, wrappers.PointWrapper)
#)
self._addressSearchInProgress = False
self._addressSignal.emit()
class ModRana(object):
"""
core modRana functionality
"""
def __init__(self, modrana, gui):
self.modrana = modrana
self.gui = gui
self.modrana.watch("mode", self._modeChangedCB)
self.modrana.watch("theme", self._themeChangedCB)
self._theme = Theme(gui)
# mode
def _getMode(self):
return self.modrana.get('mode', "car")
def _setMode(self, mode):
self.modrana.set('mode', mode)
modeChanged = signal.Signal()
def _modeChangedCB(self, *args):
"""notify when the mode key changes in options"""
self.modeChanged()
class Theme(object):
"""modRana theme handling"""
def __init__(self, gui):
self.gui = gui
# connect to the first time signal
self.gui.firstTimeSignal.connect(self._firstTimeCB)
self.themeModule = None
self._themeDict = {}
self.colors = None
self.modrana = self.gui.modrana
self.themeChanged.connect(self._notifyQMLCB)
themeChanged = signal.Signal()
def _firstTimeCB(self):
# we need the theme module
self.themeModule = self.gui.m.get('theme')
theme = self.themeModule.theme
# reload the theme dict so that
# the dict is up to date and
# then trigger the changed signal
# and give it the current theme dict
self.themeChanged(self._reloadTheme(theme))
# connect to the core theme-modules theme-changed signal
self.themeModule.themeChanged.connect(self._themeChangedCB)
def _themeChangedCB(self, newTheme):
""" Callback from the core theme module
- reload theme and trigger our own themeChanged signal
:param newTheme: new theme from the core theme module
:type newTheme: Theme
"""
self.themeChanged(self._reloadTheme(newTheme))
def _notifyQMLCB(self, newTheme):
""" Notify the QML context that the modRana theme changed
:param newTheme: the new theme
:type newTheme: dict
"""
pyotherside.send("themeChanged", newTheme)
@property
def themeId(self):
return self._themeDict.get("id")
@themeId.setter
def themeId(self, themeId):
self.modrana.set('theme', themeId)
@property
def theme(self):
return self._themeDict
def _reloadTheme(self, theme):
"""Recreate the theme dict from the new theme object
:param theme: new modRana Theme object instance
:type theme: Theme
"""
themeDict = {
"id" : theme.id,
"name" : theme.name,
"color" : {
"main_fill" : theme.getColor("main_fill", "#92aaf3"),
"main_highlight_fill" : theme.getColor("main_highlight_fill", "#f5f5f5"),
"icon_grid_toggled" : theme.getColor("icon_grid_toggled", "#c6d1f3"),
"icon_button_normal" : theme.getColor("icon_button_normal", "#c6d1f3"),
"icon_button_toggled" : theme.getColor("icon_button_toggled", "#3c60fa"),
"icon_button_text" : theme.getColor("icon_button_text", "black"),
"page_background" : theme.getColor("page_background", "black"),
"list_view_background" : theme.getColor("list_view_background", "#d2d2d2d"),
"page_header_text" : theme.getColor("page_header_text", "black"),
}
}
self._themeDict = themeDict
return themeDict
class Tracklogs(object):
"""Some tracklog specific functionality"""
SAILFISH_TRACKLOGS_SYMLINK_NAME = "modrana_tracklogs"
SAILFISH_SYMLINK_PATH = os.path.join(paths.get_home_path(), "Documents", SAILFISH_TRACKLOGS_SYMLINK_NAME)
def __init__(self, gui):
self.gui = gui
self.gui.firstTimeSignal.connect(self._firstTimeCB)
self._sendUpdates = True
def _firstTimeCB(self):
# connect to the tracklog update signal, so that we can send
# track logging state updates to the GUI
self.gui.modules.tracklog.tracklogUpdated.connect(self._sendUpdateCB)
def _sendUpdateCB(self):
"""Tracklog has been updated, send the updated info dict to GUI"""
if self._sendUpdates:
pyotherside.send("tracklogUpdated", self.gui.modules.tracklog.getStatusDict())
def setSendUpdates(self, value):
"""Set if tracklog updates should be sent to the GUI layer or not.
This is used to disable updates when the track recording page is not visible.
"""
self._sendUpdates = value
if value:
self.gui.log.debug("tracklog: enabling logging status updates")
else:
self.gui.log.debug("tracklog: disabling logging status updates")
def sailfishSymlinkExists(self):
"""Report if the easy access symlink on Sailfish OS for tracklogs exists
:returns: True if the symlink exists, False if not
:rtype: bool
"""
return os.path.islink(self.SAILFISH_SYMLINK_PATH)
def createSailfishSymlink(self):
"""Create symlink from the actual tracklogs folder in the XDG path
to ~/Documents for easier access to the tracklogs by the users
"""
self.gui.log.info("tracklogs: creating sailfish tracklogs symlink")
if self.sailfishSymlinkExists():
self.gui.log.warning("tracklogs: the Sailfish tracklogs symlink already exists")
else:
try:
os.symlink(self.gui.modrana.paths.tracklog_folder_path, self.SAILFISH_SYMLINK_PATH)
self.gui.log.info("tracklogs: sailfish tracklogs symlink created")
except Exception:
self.gui.log.exception("tracklogs: sailfish tracklogs symlink creation failed")
def removeSailfishSymlink(self):
"""Remove the easy-access Sailfish OS symlink"""
self.gui.log.info("tracklogs: removing sailfish tracklogs symlink")
if not self.sailfishSymlinkExists():
self.gui.log.warning("tracklogs: the Sailfish tracklogs symlink does not exist")
else:
try:
os.remove(self.SAILFISH_SYMLINK_PATH)
self.gui.log.info("tracklogs: sailfish tracklogs symlink removed")
except Exception:
self.gui.log.exception("tracklogs: sailfish tracklogs symlink removed")
class Routing(object):
"""Qt 5 GUI specific stuff for routing support"""
def __init__(self, gui):
self.gui = gui
self.gui.firstTimeSignal.connect(self._first_time_cb)
self._sendUpdates = True
def request_route(self, route_request):
waypoints = []
self.gui.log.debug("REQUEST:")
self.gui.log.debug(route_request)
for waypoint_dict in route_request["waypoints"]:
waypoint = point.Waypoint(lat=waypoint_dict["latitude"],
lon=waypoint_dict["longitude"],
heading=waypoint_dict["heading"])
waypoints.append(waypoint)
self.gui.modules.route.waypoints_route(waypoints)
def _first_time_cb(self):
self.gui.modules.route.routing_done.connect(self._routing_done_cb)
def _routing_done_cb(self, result):
if result and result.returnCode == constants.ROUTING_SUCCESS:
route_points = result.route.points_lle
message_points = result.route.message_points
message_points_llemi = []
for mp in message_points:
message_points_llemi.append(mp.llemi)
# also add a point for the route end
if route_points:
lastPoint = route_points[-1]
lastPointMessage = "You <b>should</b> be near the destination."
message_points_llemi.append((lastPoint[0], lastPoint[1],
lastPoint[2], lastPointMessage))
# TODO: this should really be done in the route module itself somehow
self.gui.modules.route.process_and_save_directions(result.route)
self.gui.log.debug("routing successful")
pyotherside.send("routeReceived",
{"points" : route_points,
"messagePoints" : message_points_llemi}
)
else:
error_message = constants.ROUTING_FAILURE_MESSAGES.get(result.returnCode, "Routing failed.")
self.gui.log.debug(error_message)
class Navigation(object):
"""Qt 5 GUI specific stuff for turn by turn navigation support"""
def __init__(self, gui):
self.gui = gui
self.gui.firstTimeSignal.connect(self._firstTimeCB)
self.tbt = None
def _firstTimeCB(self):
# the module machinery is not yet really setup at init time,
# so we need to do stuff involving modRana modules only
# at the first time signal
self.tbt = self.gui.modules.turnByTurn
# connect to signals
self.tbt.navigation_started.connect(self._navigation_started_cb)
self.tbt.navigation_stopped.connect(self._navigation_stopped_cb)
self.tbt.destination_reached.connect(self._destination_reached_cb)
self.tbt.rerouting_triggered.connect(self._rerouting_triggered_cb)
self.tbt.current_step_changed.connect(self._current_step_changed_cb)
def _navigation_started_cb(self):
pyotherside.send("navigationStarted")
def _navigation_stopped_cb(self):
pyotherside.send("navigationStopped")
def _destination_reached_cb(self):
pyotherside.send("navigationDestionationReached")
def _rerouting_triggered_cb(self):
pyotherside.send("navigationReroutingTriggered")
def _current_step_changed_cb(self, step_point):
step_dict = {
"message" : step_point.description,
"latitude" : step_point.lat,
"longitude" : step_point.lon,
"icon" : step_point.icon,
}
pyotherside.send("navigationCurrentStepChanged", step_dict)
def start(self):
self.tbt.start_tbt()
def stop(self):
self.tbt.stop_tbt()
|
gpl-3.0
| 6,526,115,789,858,086,000
| 36.592396
| 109
| 0.600936
| false
| 4.122261
| false
| false
| false
|
SIU-CS/J-JAM-production
|
mhapsite/mhap/forms.py
|
1
|
3810
|
"""
Contains imports of forms from django and captha and our custom models.
Has logic for form validation as well.
"""
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.forms.extras.widgets import SelectDateWidget
from captcha.fields import CaptchaField
from .models import Post, Profile
BIRTH_YEAR_CHOICES = tuple([str(date) for date in range (1980, 2000)])
#print BIRTH_YEAR_CHOICES
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = [
"title",
"content",
"secret"
]
#http://stackoverflow.com/questions/28458770/how-can-create-a-model-form-in-django-with-a-one-to-one-relation-with-another-mo
#http://stackoverflow.com/questions/11923317/creating-django-forms
class PasswordForm(forms.Form):
"""
Extends forms.Form and is essentially a password form we use for inputting password twice
"""
password1 = forms.CharField(label=("Password"), widget=forms.PasswordInput)
password2 = forms.CharField(label=("Password (again)"), widget=forms.PasswordInput)
def clean(self):
print "IN CLEAN"
print self.errors
password_two = self.cleaned_data.get('password2')
password_one = self.cleaned_data.get('password1')
#print password_one,"PASSWORD !"
#print password_one,password_two
if not password_two:
raise forms.ValidationError("Must confirm your password")
if password_one != password_two:
raise forms.ValidationError("Passwords dont match")
valid = self.user.check_password(self.cleaned_data['password1'])
print self.user
if not valid:
raise forms.ValidationError("Password Incorrect")
print self.errors
return valid
def __init__(self, user=None, *args, **kwargs):
self.user = user
print self.user, "IN INIT"
super(PasswordForm, self).__init__(*args, **kwargs)
class ChatForm(forms.Form):
"""
Extends forms.Form and is essentially a form for inputting chat messages
"""
chat = forms.CharField(label=("Input"))
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ("birth_date",)
widgets = {
'birth_date':SelectDateWidget(years=BIRTH_YEAR_CHOICES)
}
#http://stackoverflow.com/questions/23692533/django-datefield-object-has-no-attribute-is-hidden
class UserForm(forms.ModelForm):
#password1 = forms.PasswordInput()
#password2=forms.PasswordInput()
#http://stackoverflow.com/questions/4939737/cant-add-field-to-modelform-at-init?rq=1
class Meta:
model = User
fields = ('username',)
class SignUpForm(UserCreationForm):
print "IN SIGNUP"
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
birth_date = forms.DateField(help_text='Required. Format: YYYY-MM-DD')
class Meta:
print "IN META"
model = User
fields = ('username', 'email', 'birth_date','password1', 'password2', )
#http://stackoverflow.com/questions/1160030/how-to-make-email-field-unique-in-model-user-from-contrib-auth-in-django
def clean_email(self):
print "IN UNIQUE EMAIL"
email = self.cleaned_data.get('email')
print email
username = self.cleaned_data.get('username')
print username
print User.objects.filter(email=email).exclude(username=username)
if email and User.objects.filter(email=email).exclude(username=username).count():
raise forms.ValidationError(u'Email addresses must be unique.')
return email
class AxesCaptchaForm(forms.Form):
captcha = CaptchaField()
|
gpl-3.0
| -1,176,780,099,395,129,900
| 31.02521
| 125
| 0.667979
| false
| 3.891726
| false
| false
| false
|
curiouserrandy/Python-Utils
|
dataflow.py
|
1
|
11123
|
import operator
### Setting up a quick dataflow structure model for handling
### Originally inspired for handling transformations in importing mail
### messages for the mail database project (see Projects/MailSys) but
### useful enough that I wanted it globa.
### XXX: There probably are improvements to make in the base abstraction
### around handling multiple intput and output links. Oh, well.
### XXX: Some logging in the base abstraction would be good. For the
### current implementation, central logging showing input and output
### along each link would be grand. I think this requires inputs to
### pass through the base class and call up, which is fine.
### Naming conventions for internally/externally called functions and
### should/shouldn't be overridden:
### If they're external I want them to be easy to call, so I don't want
### to distinguish override/non-override in the name. I'll follow that
### pattern internally as well. Internal functions will have an _ prepended.
### XXX: Should have classes export schema and have that schema checked on
### linkage.
### XXX: You may want tagged inputs and outputs. Heck, you may want
### both tagged and numbered outputs; numbered for multiple items of
### the same type, and tagged for different categories.
### XXX: Specify interface more completely (specifically to superclasses, and
### to external functions).
### XXX: Might want to think about operator overloading to link DFNs
### (possibly mimic building a list; stream DFN container? Any good
### syntactic sugar for splits?)
### XXX: I need a clearer distinction in states between "figuring out
### linkages" and "flowing". I need to know whether I can trust
### the linkage info.
### XXX: Why am I assuming a single input before inputs get attached?
class DataflowNode(object):
"""Base class for node in a dataflow network. Takes an input record,
does some type of transformation on it, and outputs some other record.
Default action is just to pass things through.
Note that only input, _localEos, and _validate_link are intended to be overridden by
descendants."""
def __init__(self):
self.outputFunctions = []
self.outputEos = []
# Default to a single input. If there are more from other DFNs,
# the array will expand automatically, and it currently doesn't
# make sense to have no inputs for a DFN.
self.upstreamInfo = [] # Tuples of obj, output#
self.eosSeen = [False,]
self.shutdown = False
# input and eos are both called by both user and internal links
def input(self, record, inputLink=0):
"Default behavior is assertion exception; descendants should override."
assert False, "DataflowNode class not meant to be used directly."
def eos(self, inputLink=0):
self.eosSeen[inputLink] = True
if reduce(operator.and_, filter(lambda x: operator.is_not(x, None),
self.eosSeen)):
self._localEos()
for f in self.outputEos:
if f: f()
def setupInput(self, inputLink):
"""Setup a specific external input for multi-external input
nodes."""
assert inputLink > 1
self.eosSeen += \
[None,] * max(0,inputLink - len(self.eosSeen) + 1)
self.eosSeen[inputLink] = False
def _firstOpenOutput(self):
"""Used by subclasses to do auto-linking of multiple outputs."""
for i in range(len(self.outputFunctions)):
if self.outputFunctions is None:
return i
return len(self.outputFunctions)
def _validate_link(self, linknum, input_p):
"""Should be overridden if only some links are valid."""
return True
def _localEos(self):
"""Internal function called when eos has been seen on all inputs.
Descendants may override to get this notification."""
pass
def _output(self, record, links=None):
"""Internal method for outputing a record conditional on output func.
links is a list of outputs to output on; defaults to the specical value
None, meaning all of them."""
if links is None: links = range(len(self.outputFunctions))
for l in links:
if self.outputFunctions[l]:
self.outputFunctions[l](record)
def _shutdown(self):
"""Inform upstream nodes that we're going away and they shouldn't
bother us anymore. Note that this is independent from sending
eos downstream."""
self.shutdown = True
for usn in self.upstreamInfo:
(node, port) = usn
node._breakPipe(port)
def _breakPipe(self, port):
self.outputFunctions[port] = None
self.outputEos[port] = None
if not filter(None, self.outputFunctions):
# We're done; we've got no more customers
self._shutdown()
@staticmethod
def link(outputNode, inputNode, outputLink=0, inputLink=0):
assert outputNode._validate_link(outputLink, False), (outputNode, outputLink)
assert inputNode._validate_link(inputLink, True), (inputNode, inputLink)
outputNode.outputFunctions += \
[None,] * max(0,outputLink - len(outputNode.outputFunctions) + 1)
outputNode.outputEos += \
[None,] * max(0,outputLink - len(outputNode.outputEos) + 1)
inputNode.eosSeen += \
[None,] * max(0,inputLink - len(inputNode.eosSeen) + 1)
inputNode.upstreamInfo += \
[None,] * max(0,inputLink - len(inputNode.upstreamInfo) + 1)
outputNode.outputFunctions[outputLink] = \
lambda record: inputNode.input(record, inputLink=inputLink)
outputNode.outputEos[outputLink] = \
lambda: inputNode.eos(inputLink=inputLink)
inputNode.eosSeen[inputLink] = False
inputNode.upstreamInfo[inputLink] = (outputNode, outputLink)
# Utility dataflow classes
class StreamDFN(DataflowNode):
"""Easy class for binding together a single list of data flow nodes."""
def __init__(self):
DataflowNode.__init__(self)
self.start = None
self.end = None
def prepend(self, node):
if self.start:
DataflowNode.link(node, self.start)
self.start = node
else:
self.start = self.end = node
def append(self, node):
if self.end:
DataflowNode.link(self.end, node)
self.end = node
else:
self.start = self.end = node
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, one output
def input(self, record, inputLink=0):
assert inputLink == 0
if self.start:
self.start.input(record)
else:
self._output(record)
def _localEos(self):
if self.start:
self.start.eos()
class SplitDFN(DataflowNode):
"""Split the input into as many outputs as are linked."""
def __init__(self):
DataflowNode.__init__(self)
def _validate_link(self, linknum, input_p):
return linknum == 0 or not input_p # One input, any num outputs
def input(self, record, inputLink=0):
self._output(record)
def addOutput(self, downstreamNode, downstreamlink=0):
DataflowNode.link(self, downstreamNode, self._firstOpenOutput(),
downstreamlink)
class FilterDFN(DataflowNode):
"""Filters input through a specified function."""
def __init__(self, filterFunc=None, eosFunc=None):
DataflowNode.__init__(self)
self.filterFunc = filterFunc
self.eosFunc = eosFunc
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, 0-1 outputs.
def input(self, record, inputLink=0):
if self.filterFunc: self._output(self.filterFunc(record))
def _localEos(self):
if self.eosFunc: self.eosFunc()
class SinkDFN(FilterDFN):
"""Accepts input and dumps it to a specified function."""
# Implemented through FilterDFN with no outputs.
def _validate_link(self, linknum, input_p):
return input_p and linknum ==0 # Any input, no outputs
class RecordIntervalDFN(DataflowNode):
"""Only transmit a specified interval of records from input to output."""
def __init__(self, interval):
"""Only transmit records whose record number falls in the given
interval from input to output. -1 for the end of the interval means
no limit."""
DataflowNode.__init__(self)
assert isinstance(interval[0], int) and isinstance(interval[1], int)
self.interval = interval
self.recordNum = 0
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, one output
def input(self, record, inputLink=0):
if (self.recordNum >= self.interval[0]
and (self.interval[1] == -1 or self.recordNum < self.interval[1])):
self._output(record)
self.recordNum += 1
if self.interval[1] != -1 and self.recordNum >= self.interval[1]:
self.eos()
self._shutdown()
class ByteIntervalDFN(DataflowNode):
"""Only transmit a specified byte interval (where input/output is in text strings)."""
def __init__(self, interval):
"""Only transmit bytes whose position in the stream falls in the given
interval from input to output. -1 for the end of the interval means
no limit."""
DataflowNode.__init__(self)
self.interval = interval
self.byteNum = 0
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, one output
def input(self, record, inputLink=0):
strlen = len(record)
# Map the byte interval into the string coords
# Limit by string boundaries
startInStr = self.interval[0] - self.byteNum
startInStr = min(strlen, max(0, startInStr))
endInStr = self.interval[1] - self.byteNum if self.interval[1] != -1 else strlen
endInStr = min(strlen, max(0, endInStr))
self.byteNum += len(record)
if endInStr - startInStr > 0:
self._output(record[startInStr:endInStr])
if self.interval[1] != -1 and self.byteNum > self.interval[1]:
self.eos()
self._shutdown()
class BatchRecordDFN(DataflowNode):
"""Pass on records input->output in batches. A batchsize of 0 means to
wait until end of stream."""
def __init__(self, batchsize):
DataflowNode.__init__(self)
self.batchsize = batchsize
self.recordlist = []
def _validate_link(self, linknum, input_p):
return linknum == 0 # One input, one output
def _push(self):
self._output(self.recordlist)
self.recordlist = []
def input(self, record, inputLink=0):
self.recordlist += (record,)
if self.batchsize and len(self.recordlist) >= self.batchsize:
self._push()
def _localEos(self):
if self.recordlist: self._push()
|
gpl-2.0
| -3,249,355,095,485,375,000
| 37.487889
| 90
| 0.636879
| false
| 3.959772
| false
| false
| false
|
czervenka/gapi
|
gapi/api/bigquery.py
|
1
|
3051
|
# Copyright 2013 Lukas Lukovsky <lukas.lukovsky@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from gapi.exceptions import GoogleApiHttpException
from ..client import ApiService, ApiResource
class Service(ApiService):
_base_url = 'https://www.googleapis.com/bigquery/v2'
_default_scope = 'https://www.googleapis.com/auth/bigquery'
@property
def _resources(self):
return [DataSets, Jobs, JobsQueries, Tables]
ApiService._services['bigquery'] = Service
class DataSets(ApiResource):
project_id = None
_name = 'datasets'
_methods = ['list']
@property
def _base_path(self):
return '/projects/%s/datasets' % self.project_id
class Jobs(ApiResource):
project_id = None
_name = 'jobs'
_methods = 'get', 'insert', 'list'
@property
def _base_path(self):
return '/projects/%s/jobs' % self.project_id
class JobsQueries(ApiResource):
project_id = None
_name = 'jobs_queries'
_methods = 'getQueryResults', 'query'
@property
def _base_path(self):
return '/projects/%s/queries' % self.project_id
def _api_getQueryResults(self, id, **kwargs):
return ApiResource._api_get(self, id, method='GET', params=kwargs)
def _api_query(self, query, **kwargs):
return self._service.fetch(self._get_item_url({}), method='POST', payload=query, params=kwargs)
class Tables(ApiResource):
project_id = None
_name = 'tables'
_methods = 'get', 'update', 'insert_all'
@property
def _base_path(self):
return '/projects/%s' % self.project_id
def _get_item_url(self, dataset_id, table_id):
return '%s/datasets/%s/tables/%s' % (self._base_url, dataset_id, table_id)
def _api_get(self, dataset_id, table_id, **kwargs):
return self._service.fetch(
self._get_item_url(dataset_id, table_id), method='GET', params=kwargs)
def _api_update(self, dataset_id, table_id, body, **kwargs):
return self._service.fetch(
self._get_item_url(dataset_id, table_id), method='PUT', payload=body, params=kwargs)
def _api_insert_all(self, dataset_id, table_id, rows, **kwargs):
body = dict()
body['kind'] = "bigquery#tableDataInsertAllRequest"
body['rows'] = rows
res = self._service.fetch(
self._get_item_url(dataset_id, table_id) + "/insertAll", method='POST', payload=body, params=kwargs)
if 'insertErrors' in res:
raise GoogleApiHttpException(res['insertErrors'])
return res
|
apache-2.0
| 6,696,008,212,205,719,000
| 29.51
| 112
| 0.656178
| false
| 3.523095
| false
| false
| false
|
felipenaselva/felipe.repository
|
script.module.resolveurl/lib/resolveurl/plugins/watchers.py
|
1
|
1793
|
"""
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
resolveurl XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from lib import helpers
from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
class WatchersResolver(ResolveUrl):
name = "watchers"
domains = ['watchers.to']
pattern = '(?://|\.)(watchers\.to)/(?:embed-)?([a-zA-Z0-9]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.RAND_UA}
html = self.net.http_GET(web_url, headers=headers).content
if html:
packed = helpers.get_packed_data(html)
headers.update({'Referer': web_url})
sources = helpers.parse_sources_list(packed)
if sources: return helpers.pick_source(sources) + helpers.append_headers(headers)
raise ResolverError('File not found')
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
|
gpl-2.0
| 2,952,388,391,311,143,000
| 36.354167
| 93
| 0.660904
| false
| 3.923414
| false
| false
| false
|
westerncapelabs/django-grs-gatewaycms
|
quiz/migrations/0001_initial.py
|
1
|
8276
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Quiz'
db.create_table(u'quiz_quiz', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=30)),
('description', self.gf('django.db.models.fields.CharField')(max_length=163)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'quiz', ['Quiz'])
# Adding model 'Question'
db.create_table(u'quiz_question', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('quiz_id', self.gf('django.db.models.fields.related.ForeignKey')(related_name='q_quiz_id', to=orm['quiz.Quiz'])),
('question', self.gf('django.db.models.fields.CharField')(max_length=163)),
))
db.send_create_signal(u'quiz', ['Question'])
# Adding model 'Answer'
db.create_table(u'quiz_answer', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question_id', self.gf('django.db.models.fields.related.ForeignKey')(related_name='question_id', to=orm['quiz.Question'])),
('answer', self.gf('django.db.models.fields.CharField')(max_length=156)),
('response', self.gf('django.db.models.fields.CharField')(max_length=156)),
('correct', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'quiz', ['Answer'])
# Adding model 'FinalResponse'
db.create_table(u'quiz_finalresponse', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('quiz_id', self.gf('django.db.models.fields.related.ForeignKey')(related_name='fr_quiz_id', to=orm['quiz.Quiz'])),
('text', self.gf('django.db.models.fields.CharField')(max_length=180)),
('sms', self.gf('django.db.models.fields.CharField')(max_length=160)),
('for_total', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal(u'quiz', ['FinalResponse'])
def backwards(self, orm):
# Deleting model 'Quiz'
db.delete_table(u'quiz_quiz')
# Deleting model 'Question'
db.delete_table(u'quiz_question')
# Deleting model 'Answer'
db.delete_table(u'quiz_answer')
# Deleting model 'FinalResponse'
db.delete_table(u'quiz_finalresponse')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'quiz.answer': {
'Meta': {'object_name': 'Answer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '156'}),
'correct': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_id'", 'to': u"orm['quiz.Question']"}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '156'})
},
u'quiz.finalresponse': {
'Meta': {'object_name': 'FinalResponse'},
'for_total': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quiz_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fr_quiz_id'", 'to': u"orm['quiz.Quiz']"}),
'sms': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '180'})
},
u'quiz.question': {
'Meta': {'object_name': 'Question'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '163'}),
'quiz_id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'q_quiz_id'", 'to': u"orm['quiz.Quiz']"})
},
u'quiz.quiz': {
'Meta': {'object_name': 'Quiz'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '163'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['quiz']
|
mit
| -552,876,124,836,205,300
| 60.311111
| 187
| 0.562953
| false
| 3.553456
| false
| false
| false
|
mtils/ems
|
ems/qt/graphics/selection_rect.py
|
1
|
3018
|
from ems.qt import QtWidgets, QtGui, QtCore
QGraphicsItem = QtWidgets.QGraphicsItem
QGraphicsObject = QtWidgets.QGraphicsObject
QObject = QtCore.QObject
pyqtProperty = QtCore.pyqtProperty
QStyle = QtWidgets.QStyle
QBrush = QtGui.QBrush
QColor = QtGui.QColor
QRectF = QtCore.QRectF
Qt = QtCore.Qt
QEvent = QtCore.QEvent
class SelectionRect(QGraphicsObject):
def __init__(self, parent=None):
super(SelectionRect, self).__init__(parent)
self._target = None
self._visible = None
self._margin = 10.0
self.setFlags(QGraphicsItem.ItemIsSelectable|
QGraphicsItem.ItemIsMovable)
def getTarget(self):
return self._target
def setTarget(self, target):
if self._target is target or target is self:
return
if self._target:
self._target.removeSceneEventFilter(self)
self._target = target
if self.scene() is not self._target.scene():
self._target.scene().addItem(self)
#self._target.positionChanged += self._moveWithTarget
self._target.installSceneEventFilter(self)
self.setPos(self._target.pos())
self.setZValue(self._target.zValue()-1)
target = pyqtProperty(QGraphicsItem, getTarget, setTarget)
def boundingRect(self):
if not self._target:
return QRectF()
targetRect = self._target.boundingRect()
myRect = QRectF(targetRect.topLeft(), targetRect.size())
myRect.setWidth(targetRect.width() + self._margin + self._margin)
myRect.setHeight(targetRect.height() + self._margin + self._margin)
#myRect.moveLeft(self._margin)
myRect.moveTo(targetRect.x() - self._margin, targetRect.y() - self._margin)
return myRect
def paint(self, painter, option, widget=None):
#super(TextItem, self).paint(painter, option, widget)
#if not (option.state & QStyle.State_Selected):
#return
rect = self.boundingRect()
innerRect = self._target.boundingRect()
#w = rect.width()
#h = rect.height()
#s = 4
brush = QBrush(QColor(128,179,255))
#painter.setPen(Qt.NoPen)
brush.setStyle(Qt.NoBrush)
painter.setBrush(brush)
#painter.setColor(QColor(128,179,255))
painter.drawRect(innerRect)
painter.drawRect(rect)
#painter.fillRect(0, 0, s, s, brush);
#painter.fillRect(0, 0 + h - s, s, s, brush);
#painter.fillRect(0 + w - s, 0, s, s, brush);
def mouseMoveEvent(self, event):
super(SelectionRect, self).mouseMoveEvent(event)
self._target.setPos(self.pos())
def sceneEventFilter(self, watched, event):
return False
print("event", event.type())
# Redirect Mouse move to self
if event.type() != QEvent.GraphicsSceneMouseMove:
return False
self.mouseMoveEvent(event)
return True
def _moveWithTarget(self, position):
self.setPos(position)
|
mit
| 349,490,669,558,714,940
| 31.804348
| 83
| 0.629225
| false
| 3.735149
| false
| false
| false
|
jarifibrahim/ashoka-dashboard
|
dashboard/migrations/0018_auto_20170124_2054.py
|
1
|
4825
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-24 15:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0017_auto_20170110_0012'),
]
operations = [
migrations.AlterModelOptions(
name='advisoryphase',
options={'verbose_name_plural': 'Advisory Phases (Optional)'},
),
migrations.AlterModelOptions(
name='consultantsurvey',
options={'verbose_name': 'Consultant Survey (Optional)', 'verbose_name_plural': 'Consultant Surveys (Optional)'},
),
migrations.AlterModelOptions(
name='fellowsurvey',
options={'verbose_name': 'Fellow Survey (Optional)', 'verbose_name_plural': 'Fellow Surveys (Optional)'},
),
migrations.AlterModelOptions(
name='member',
options={'verbose_name': 'Member', 'verbose_name_plural': 'Members'},
),
migrations.AlterModelOptions(
name='role',
options={'verbose_name_plural': 'Roles (Optional)'},
),
migrations.AlterModelOptions(
name='secondaryrole',
options={'verbose_name_plural': 'Secondary Roles (Optional)'},
),
migrations.AlterModelOptions(
name='team',
options={'verbose_name': 'Team', 'verbose_name_plural': 'Teams'},
),
migrations.AlterModelOptions(
name='teamstatus',
options={'verbose_name': 'Team status (Optional)', 'verbose_name_plural': 'Team status (Optional)'},
),
migrations.AlterModelOptions(
name='teamwarning',
options={'verbose_name': 'Team Warnings (Optional)', 'verbose_name_plural': 'Team Warnings (Optional)'},
),
migrations.AlterModelOptions(
name='weekwarning',
options={'verbose_name': 'Weekly Warnings (Optional)', 'verbose_name_plural': 'Weekly Warnings (Optional)'},
),
migrations.AddField(
model_name='teamwarning',
name='advisor_on',
field=models.CharField(choices=[('G', 'Green'), ('Y', 'Yellow'), ('R', 'Red')], default='G', max_length=3, verbose_name='Warning - Advisor Onboarding'),
),
migrations.AddField(
model_name='teamwarning',
name='advisor_on_comment',
field=models.CharField(blank=True, max_length=300, verbose_name='Comment - Advisory Onboarding'),
),
migrations.AddField(
model_name='teamwarning',
name='sys_vision',
field=models.CharField(choices=[('G', 'Green'), ('Y', 'Yellow'), ('R', 'Red')], default='G', max_length=3, verbose_name='Warning - Systemic Vision'),
),
migrations.AddField(
model_name='teamwarning',
name='sys_vision_comment',
field=models.CharField(blank=True, max_length=300, verbose_name='Comment - Systemic Vision'),
),
migrations.AddField(
model_name='weekwarning',
name='advisor_on_r',
field=models.BooleanField(default=False, help_text='Advisor Onboarding not happened in this week leads to Red warning.', verbose_name='Advisor Onboarding - Red warning'),
preserve_default=False,
),
migrations.AddField(
model_name='weekwarning',
name='advisor_on_y',
field=models.BooleanField(default=False, help_text='Advisor Onboarding not happened in this week leads to Yellow warning.', verbose_name='Advisor Onboarding - Yellow warning'),
preserve_default=False,
),
migrations.AddField(
model_name='weekwarning',
name='sys_vision_r',
field=models.BooleanField(default=False, help_text='Systemic Vision not happened in this week leads to Red warning', verbose_name='Systemic Vision - Red warning'),
preserve_default=False,
),
migrations.AddField(
model_name='weekwarning',
name='sys_vision_y',
field=models.BooleanField(default=False, help_text='Systemic Vision not happened in this week leads to Yellow warning', verbose_name='Systemic Vision - Yellow warning'),
preserve_default=False,
),
migrations.AlterField(
model_name='member',
name='secondary_role',
field=models.ManyToManyField(blank=True, related_name='secondary_role', to='dashboard.SecondaryRole'),
),
migrations.AlterField(
model_name='teamstatus',
name='advisor_onboarding_comment',
field=models.TextField(blank=True, verbose_name='Advisor Onboarding Comment'),
),
]
|
apache-2.0
| 7,202,574,898,264,482,000
| 43.266055
| 188
| 0.594197
| false
| 4.277482
| false
| false
| false
|
atalax/AsteroidOSLinux
|
asteroid/__init__.py
|
1
|
7309
|
import argparse
import collections
import datetime
import functools
import itertools
import random
import struct
import time
import xml
from asteroid import bleee
from gi.repository import GLib
def ensure_connected(fn):
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
# Note that this does not really strongly guarantee anything as the
# device can disconnect at any time
self.connect()
ret = fn(self, *args, **kwargs)
# Do we want to schedule a disconnect? Or is BLE low power enough?
return ret
return wrapper
class WeatherPredictions:
Prediction = collections.namedtuple("Prediction", ["id_", "min_", "max_"])
MAX_LEN = 5
def __init__(self, city_name):
self.city_name = city_name
self.values = []
def append_prediction(self, id_, min_, max_):
if len(self.values) >= WeatherPredictions.MAX_LEN:
raise ValueError("Maximum length exceeded")
self.values.append(WeatherPredictions.Prediction(
id_=id_, min_=min_, max_=max_))
@classmethod
def from_owm(class_, owmforecast):
# We will get None if the name is no
loc = owmforecast.get_location()
name = loc.get_name()
if not name:
name = "%.3f %.3f" % (loc.get_lat(), loc.get_lon())
ret = class_(name)
for x in range(WeatherPredictions.MAX_LEN):
w = owmforecast.get(x)
ret.append_prediction(w.get_weather_code(),
w.get_temperature()["min"],
w.get_temperature()["max"])
return ret
class Asteroid:
UUID_BATTERY = "00002a19-0000-1000-8000-00805f9b34fb"
UUID_TIME = "00005001-0000-0000-0000-00a57e401d05"
UUID_SCREENSHOT_REQ = "00006001-0000-0000-0000-00a57e401d05"
UUID_SCREENSHOT_RESP = "00006002-0000-0000-0000-00a57e401d05"
UUID_NOTIF_UPD = "00009001-0000-0000-0000-00a57e401d05"
UUID_WEATHER_CITY = "00008001-0000-0000-0000-00a57e401d05"
UUID_WEATHER_IDS = "00008002-0000-0000-0000-00a57e401d05"
UUID_WEATHER_MINT = "00008003-0000-0000-0000-00a57e401d05"
UUID_WEATHER_MAXT = "00008004-0000-0000-0000-00a57e401d05"
UUID_MEDIA_TITLE = "00007001-0000-0000-0000-00a57e401d05"
UUID_MEDIA_ALBUM = "00007002-0000-0000-0000-00a57e401d05"
UUID_MEDIA_ARTIST = "00007003-0000-0000-0000-00a57e401d05"
UUID_MEDIA_PLAY = "00007004-0000-0000-0000-00a57e401d05"
UUID_MEDIA_COMM = "00007005-0000-0000-0000-00a57e401d05"
MEDIA_COMMAND_PREVIOUS = 0x0
MEDIA_COMMAND_NEXT = 0x1
MEDIA_COMMAND_PLAY = 0x2
MEDIA_COMMAND_PAUSE = 0x3
def __init__(self, address):
self.ble = bleee.BLE()
self.address = address
self.dev = self.ble.device_by_address(self.address)
self.disconnect_timeout = None
self._disconnect_id = None
def connect(self):
# We also want to wait until services are resolved
while not self.dev.connected or not self.dev.services_resolved:
if not self.dev.connected:
try:
# Problematically, dbus calls block the entire event loop
# TODO: Fix this
self.dev.connect()
except GLib.GError:
# Just ignore everything for now
pass
else:
time.sleep(0.1)
@ensure_connected
def battery_level(self):
return self.dev.char_by_uuid(Asteroid.UUID_BATTERY).read()[0]
@ensure_connected
def update_time(self, to=None):
if to is None:
to = datetime.datetime.now()
data = [
to.year - 1900,
to.month - 1,
to.day,
to.hour,
to.minute,
to.second
]
self.dev.char_by_uuid(Asteroid.UUID_TIME).write(data)
@ensure_connected
def screenshot(self):
# TODO: This disconnects after a few callbacks, fix
crsp = self.dev.char_by_uuid(Asteroid.UUID_SCREENSHOT_RESP)
loop = GLib.MainLoop()
data_rem = None
def cb(*args):
print(args)
#loop.quit()
crsp.start_notify()
crsp.properties_changed.connect(cb)
self.dev.char_by_uuid(Asteroid.UUID_SCREENSHOT_REQ).write(b"\x00")
loop.run()
@ensure_connected
def notify(self, summary, body=None, id_=None, package_name=None,
app_name=None, app_icon=None):
if id_ is None:
id_ = random.randint(0, 2 ** 31)
id_ = str(id_)
xinsert = xml.etree.ElementTree.Element("insert")
for vl, xn in ((summary, "su"),
(body, "bo"),
(id_, "id"),
(package_name, "pn"),
(app_name, "an"),
(app_icon, "ai")):
if vl is not None:
xel = xml.etree.ElementTree.SubElement(xinsert, xn)
xel.text = vl
data = xml.etree.ElementTree.tostring(xinsert)
self.dev.char_by_uuid(Asteroid.UUID_NOTIF_UPD).write(data)
return id_
@ensure_connected
def update_weather(self, predictions):
# Set city name
self.dev.char_by_uuid(Asteroid.UUID_WEATHER_CITY).write(
predictions.city_name.encode())
self.dev.char_by_uuid(Asteroid.UUID_WEATHER_IDS).write(
struct.pack(">5H", *[round(p.id_) for p in predictions.values]))
self.dev.char_by_uuid(Asteroid.UUID_WEATHER_MINT).write(
struct.pack(">5H", *[round(p.min_) for p in predictions.values]))
self.dev.char_by_uuid(Asteroid.UUID_WEATHER_MAXT).write(
struct.pack(">5H", *[round(p.max_) for p in predictions.values]))
def update_media(self, title, album, artist, playing):
self.dev.char_by_uuid(Asteroid.UUID_MEDIA_TITLE).write(title.encode())
self.dev.char_by_uuid(Asteroid.UUID_MEDIA_ALBUM).write(album.encode())
self.dev.char_by_uuid(Asteroid.UUID_MEDIA_ARTIST).write(artist.encode())
self.dev.char_by_uuid(Asteroid.UUID_MEDIA_PLAY).write(
b"\x01" if playing else b"\x00")
def register_media_listener(self, fn):
# TODO: A way to unregister
ccomm = self.dev.char_by_uuid(Asteroid.UUID_MEDIA_COMM)
def cb(name, vals, lst):
if not "Value" in vals:
return
fn(vals["Value"][0])
ccomm.properties_changed.connect(cb)
ccomm.start_notify()
class DBusEavesdropper:
def __init__(self, bus, interface, member, callback):
self.bus = bus
self.interface = interface
self.member = member
self.callback = callback
self._dbus_ctl = self.bus.get("org.freedesktop.DBus")
# TODO: Escaping
# TODO: We probably want to unregister when destroyed?
self._match_id = self._dbus_ctl.AddMatch(
"interface=%s,member=%s,eavesdrop=true" %
(interface, member))
self.bus.con.add_filter(self._filter_func)
def _filter_func(self, con, msg, bl):
if msg.get_interface() == self.interface and \
msg.get_member() == self.member:
self.callback(msg)
|
mit
| 6,464,442,055,945,354,000
| 35.183168
| 80
| 0.586674
| false
| 3.459063
| false
| false
| false
|
openstack/manila-ui
|
manila_ui/dashboards/project/share_snapshots/tabs.py
|
1
|
1082
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import tabs
class ShareSnapshotOverviewTab(tabs.Tab):
name = _("Share Snapshot Overview")
slug = "share_snapshot_overview_tab"
template_name = "project/share_snapshots/_detail.html"
def get_context_data(self, request):
return {"snapshot": self.tab_group.kwargs['snapshot']}
class ShareSnapshotDetailTabs(tabs.TabGroup):
slug = "share_snapshot_details"
tabs = (
ShareSnapshotOverviewTab,
)
|
apache-2.0
| -1,347,417,031,697,469,000
| 33.903226
| 78
| 0.716266
| false
| 3.977941
| false
| false
| false
|
HPENetworking/topology_lib_sflowtool
|
lib/topology_lib_sflowtool/parser.py
|
1
|
9867
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Parse sflowtool commands with output to a Python dictionary.
"""
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
from re import search, findall, DOTALL, match
from logging import getLogger
from collections import OrderedDict
log = getLogger(__name__)
def parse_pid(response):
"""
Parse PID shell output using a regular expression.
:param str response: Output of a shell forking a subprocess.
"""
assert response
pid_regex = r'\[\d*\]\s+(?P<pid>\d+)'
regex_result = search(pid_regex, response)
if not regex_result:
log.debug('Failed to parse pid from:\n{}'.format(response))
raise Exception('PID regular expression didn\'t match.')
return int(regex_result.groupdict()['pid'])
def parse_sflowtool(raw_output, mode):
"""
Parse the sflowtool output command raw output.
:param str raw_output: bash raw result string.
:rtype: dict
:return: In the line mode, all sflow packets seen at the collector parsed
in the form:
::
{
'flow_count':10
'sample_count':5
'packets':[
{
'packet_type':'FLOW',
'agent_address':'10.10.11.1',
'in_port':8,
....(fields in FLOW packet)
},
{
'packet_type':'CNTR',
'agent_address':'10.10.11.1',
'if_index':2,
....(fields in CNTR packet)
}
]
}
In the detail mode, all sflow packets seen at the collector parsed
in the form:
::
{
'datagrams':
[
{
'datagramSourceIP':'10.10.12.1',
'datagramSize':'924',
'unixSecondsUTC':'1473185811',
....(fields in datagram packet)
'samples':
[
{
'sampleType_tag':'0:1'
'sampleType':'FLOWSAMPLE'
'headerLen':'64'
....(fields in sample)
},
{
'sampleType_tag':'0:2'
'sampleType':'COUNTERSSAMPLE'
'sampleSequenceNo':'1'
....(fields in sample)
},
....(all the samples captured in the datagram)
'cntr_samples': 1,
'flow_samples': 1,
]
},
{
'datagramSourceIP':'10.10.12.1',
'datagramSize':'924',
'unixSecondsUTC':'1473185811',
....(fields in datagram packet)
'samples':
[
{
'sampleType_tag':'0:1'
'sampleType':'FLOWSAMPLE'
'headerLen':'64'
....(fields in sample)
},
{
'sampleType_tag':'0:2'
'sampleType':'COUNTERSSAMPLE'
'sampleSequenceNo':'2'
....(fields in sample)
},
....(all the samples captured in the datagram)
'cntr_samples': 1,
'flow_samples': 1
]
},
....(all the datagrams captured)
]
'number_of_datagrams': 2
}
"""
if mode == 'line':
# Refer https://github.com/sflow/sflowtool regarding below fields
flow_packet_fields = ['packet_type', 'agent_address', 'in_port',
'out_port', 'src_mac', 'dst_mac',
'eth_type', 'in_vlan', 'out_vlan',
'src_ip', 'dst_ip', 'ip_protocol', 'ip_tos',
'ip_ttl', 'icmp_type', 'icmp_code', 'tcp_flags',
'packet_size', 'ip_size', 'sampling_rate']
sample_packet_fields = ['packet_type', 'agent_address', 'if_index',
'if_type', 'if_speed', 'if_direction',
'if_status', 'in_octets', 'in_ucastPkts',
'in_mcastPkts', 'in_bcastPkts', 'in_discards',
'in_errors', 'in_unknownProtos', 'out_octets',
'out_ucastPkts', 'out_mcastPkts',
'out_bcastPkts', 'out_discards', 'out_errors',
'if_promiscuousMode']
output = raw_output.splitlines()
flow_count = 0
sample_count = 0
result = {}
packets = []
for line in output:
packet = {} # sFlow packet information
sflow_packet = line.split(",")
if sflow_packet[0] == 'FLOW':
assert len(sflow_packet) == len(flow_packet_fields)
for field in range(len(sflow_packet)):
packet[flow_packet_fields[field]] = sflow_packet[field]
flow_count = flow_count + 1
packets.append(packet)
elif sflow_packet[0] == 'CNTR':
assert len(sflow_packet) == len(sample_packet_fields)
for field in range(len(sflow_packet)):
packet[sample_packet_fields[field]] = sflow_packet[field]
sample_count = sample_count + 1
packets.append(packet)
result['flow_count'] = flow_count
result['sample_count'] = sample_count
result['packets'] = packets
return result
elif mode == 'detail':
result = {}
result['datagrams'] = []
result['number_of_datagrams'] = 0
# Strings to be used while matching datagrams and samples
# in the output from sflowtool
start_datagram = 'startDatagram =================================\n'
end_datagram = 'endDatagram =================================\n'
start_sample = 'startSample ----------------------\n'
end_sample = 'endSample ----------------------\n'
# Regex string for identifying start/end of datagrams & samples
finder = r'{}(.*?){}'
# Regex to parse datagram attributes
datagram_info_re = (
r'datagramSourceIP\s(?P<datagramSourceIP>.+)\s'
r'datagramSize\s(?P<datagramSize>.+)\s'
r'unixSecondsUTC\s(?P<unixSecondsUTC>.+)\s'
r'datagramVersion\s(?P<datagramVersion>.+)\s'
r'agentSubId\s(?P<agentSubId>.+)\s'
r'agent\s(?P<agent>.+)\s'
r'packetSequenceNo\s(?P<packetSequenceNo>.+)\s'
r'sysUpTime\s(?P<sysUpTime>.+)\s'
r'samplesInPacket\s(?P<samplesInPacket>\d+)\s'
)
# Regex for matching attributes inside a sample
attribute_re = '((.+) (.+))'
# Make a list of datagrams from the sflowtool raw output
datagrams = findall(
finder.format(start_datagram, end_datagram), raw_output, DOTALL)
for datagram in datagrams:
# Get the datagram specific attributes and form a dict
re_result = match(datagram_info_re, datagram, DOTALL)
datagram_dict = re_result.groupdict()
# Initialize sample specific data inside the datagram_dict
datagram_dict['samples'] = []
datagram_dict['flow_samples'] = 0
datagram_dict['cntr_samples'] = 0
# Get list of samples from within the datagram
samples = findall(
finder.format(start_sample, end_sample), datagram, DOTALL)
for sample in samples:
sample_lines = sample.splitlines()
sample_dict = {}
# Match the attributes of each sample and populate
# into the sample_dict
for sample_line in sample_lines:
attribute = match(attribute_re, sample_line)
sample_dict[attribute.group(2)] = attribute.group(3)
# Add the sample to the list of samples under the datagram
datagram_dict['samples'].append(sample_dict)
# Increment respective counters based on type of sample
if sample_dict['sampleType'] == 'FLOWSAMPLE':
datagram_dict['flow_samples'] += 1
elif sample_dict['sampleType'] == 'COUNTERSSAMPLE':
datagram_dict['cntr_samples'] += 1
# Add the parsed datagram to result and increment count
# of datagrams
result['datagrams'].append(datagram_dict)
result['number_of_datagrams'] += 1
return result
__all__ = [
'parse_sflowtool'
]
|
apache-2.0
| -8,103,275,785,004,340,000
| 35.275735
| 78
| 0.480085
| false
| 4.509598
| false
| false
| false
|
bbengfort/cloudscope
|
tests/test_utils/test_statistics.py
|
1
|
23826
|
# tests.test_utils.test_statistics
# Testing for the statistics utility module.
#
# Author: Benjamin Bengfort <bengfort@cs.umd.edu>
# Created: Tue Aug 23 13:40:49 2016 -0400
#
# Copyright (C) 2016 University of Maryland
# For license information, see LICENSE.txt
#
# ID: test_statistics.py [] benjamin@bengfort.com $
"""
Testing for the statistics utility module.
"""
##########################################################################
## Imports
##########################################################################
import unittest
from itertools import product
from cloudscope.utils.statistics import *
##########################################################################
## Fixtures
##########################################################################
# Empty test case
EMPTY = []
# Float test cases
FLOATS = [
# Uniform random [0.0, 1.0)
[ 0.43730873, 0.66860239, 0.34969353, 0.64048078, 0.06388402,
0.27340017, 0.77561069, 0.0469865 , 0.00952501, 0.905221 ,
0.85934168, 0.81024019, 0.06976906, 0.54518943, 0.27677394,
0.12399665, 0.43676722, 0.5155873 , 0.38699299, 0.76616917,
0.02502538, 0.40382399, 0.99043387, 0.71853195, 0.42132248,
0.23079655, 0.12753139, 0.72196278, 0.63156918, 0.58127711,
0.323632 , 0.75723769, 0.55014024, 0.48530899, 0.81193682,
0.63641341, 0.9896141 , 0.59410421, 0.08999124, 0.44973318,
0.20534478, 0.35249505, 0.68384246, 0.10588445, 0.81486703,
0.82123886, 0.23312338, 0.29706749, 0.95132877, 0.53760118,
0.52506907, 0.18586977, 0.10429846, 0.37754277, 0.80177148,
0.8923954 , 0.01853723, 0.32609851, 0.83528495, 0.59851704,
0.94780306, 0.00333868, 0.64453206, 0.68733814, 0.69465826,
0.17311021, 0.81104648, 0.36074105, 0.86521824, 0.57384475,
0.54296227, 0.95244882, 0.4340912 , 0.79415668, 0.36713392,
0.01035679, 0.37757458, 0.86641362, 0.24478224, 0.48594984,
0.16053626, 0.4092285 , 0.52627802, 0.12932203, 0.49634128,
0.69001666, 0.62750143, 0.22644635, 0.61771225, 0.26848362,
0.38573517, 0.82619298, 0.4761255 , 0.60803911, 0.25304987,
0.30113422, 0.57631252, 0.66860624, 0.23604634, 0.21473307 ],
# Uniform random [0.0, 100.0]
[ 22.20520866, 17.17258577, 8.49659732, 13.95346708,
55.55125426, 8.80317998, 24.68324592, 96.84491714,
22.72401521, 73.64288806, 42.17152252, 14.37810073,
34.24014255, 60.81097632, 59.87367563, 52.2406963 ,
6.49507369, 39.25094041, 72.35007601, 94.3952359 ,
28.06879455, 39.47692788, 13.88718282, 6.97516371,
66.55848707, 31.92083665, 25.32500032, 56.42714507,
7.51769482, 28.60784098, 24.08155829, 89.91651969,
47.86004113, 71.85761032, 82.4294561 , 68.91388351,
4.44603844, 42.60146732, 64.99026944, 11.28079872,
89.95259469, 21.62473926, 40.67768745, 74.03776227,
47.28452248, 42.43533983, 4.54809125, 64.33809063,
0.48360149, 53.58079114, 71.05946081, 68.42234587,
70.6191961 , 89.55513029, 23.68983622, 46.13571428,
95.80340964, 31.05251035, 18.16043837, 85.30237868,
34.85336423, 85.13625608, 33.24675386, 65.98221573,
43.41008904, 1.41689122, 25.66436842, 97.83154993,
28.95244763, 58.6737343 , 31.11425024, 39.89891167,
18.87817841, 63.74118985, 7.34593289, 11.56234643,
70.74670506, 94.08037005, 20.42316914, 46.72559006,
41.20221363, 81.16258525, 83.10004094, 22.6069545 ,
46.25125172, 19.02403741, 41.4161593 , 17.98574115,
83.66625755, 66.30583531, 74.77229409, 81.07751229,
6.00914795, 18.30008296, 37.99743388, 23.08334708,
36.67738259, 28.58053073, 76.88689287, 88.09260102 ],
# Normal random gaussian distribution with mean=0 and sigma=1
[ 1.36628297, -0.95063669, -0.7409544 , 0.49168896, 0.64369943,
-1.36683641, -0.85050743, 0.14056131, 0.40071956, 0.06894656,
-0.35099341, -0.94658349, -0.05191993, -0.68728832, 1.5290626 ,
-0.29765041, -1.47736747, 1.42462537, -0.93737476, -1.75120617,
0.37956676, -0.41298492, 1.26101492, -1.11693991, 0.86228129,
1.25771588, -1.698729 , -0.34431668, -0.34907691, 1.52828139,
-1.65994198, -1.22920884, -1.416939 , 0.4581475 , 0.25962794,
-1.10938565, -2.01038612, -0.89623881, -0.02473882, -0.10925982,
1.49019119, -0.71829783, -0.57516934, 1.31854532, 0.64051439,
-0.539811 , -0.36544998, 0.34168854, 1.03403893, 0.1788948 ,
0.3961166 , -2.04685416, -0.50117633, 0.72760044, -1.23274552,
-0.34341958, -0.75571399, 1.39371562, -0.01919108, -0.17840926,
0.27388972, 0.81694269, 0.19208915, -0.90984528, -0.43602705,
-1.9333356 , -0.82054677, -0.22563851, 0.38139457, 0.35015976,
0.70850311, -0.24979133, -0.83115026, -0.22170927, -1.47006649,
-1.42263061, 2.67703557, -0.4531137 , -0.01348267, -0.1477644 ,
-0.59528241, -0.99513121, -0.19154543, 1.3695901 , -0.40227537,
1.37180334, 0.52361872, -0.09802685, 1.70726494, -0.28957362,
2.12909179, 0.91799377, 0.75537678, 0.35040934, -0.20546863,
1.70405968, 0.96502427, 0.81638739, 1.88802825, 1.06889865 ],
# Normal random gaussian distribution with mean=100 and sigma=18
[ 97.15759554, 77.17442118, 121.65339951, 115.47590292,
83.13049538, 80.58906683, 133.28962059, 101.19894129,
102.23057183, 92.82933217, 96.243821 , 97.2783628 ,
99.62594213, 119.51777017, 141.770821 , 111.48806454,
87.59254024, 92.02257259, 87.26595797, 106.22640402,
120.33377392, 80.79363771, 85.66117399, 62.35418484,
135.02835057, 128.33176531, 105.24356978, 125.16042398,
84.50687617, 99.95500342, 102.14580588, 129.78867181,
130.95831888, 107.58328424, 78.38888971, 85.59218946,
132.50719329, 82.13758304, 100.1639717 , 80.05534368,
91.68220069, 86.70158004, 88.42494344, 92.22226738,
93.75785656, 95.36178327, 85.2791005 , 88.03325987,
100.78703198, 136.73102739, 92.70148723, 115.0907645 ,
120.05135927, 100.12796585, 70.13846055, 136.07669925,
97.44144139, 109.51705036, 99.31486862, 111.37134817,
78.430312 , 114.61626988, 103.06188281, 55.51858758,
104.55853914, 126.27837134, 91.4791138 , 91.74949264,
99.45526277, 60.83926795, 73.31706548, 109.1869211 ,
92.71997445, 92.29068272, 69.76686038, 96.69493926,
98.98031343, 105.2876436 , 124.74573867, 123.59994673,
114.99458381, 93.90003085, 89.68415181, 135.73404241,
112.74098956, 118.8758599 , 77.30905375, 83.08948144,
123.81454249, 95.22466886, 79.00660774, 87.19579895,
105.46176326, 110.65034971, 95.13515247, 62.24700869,
88.66132196, 90.00716862, 107.83890058, 97.73738434],
]
# Integer test cases
INTEGERS = [
# Between 1 and 10
[7, 6, 1, 1, 7, 3, 9, 5, 7, 6, 3, 1, 5, 3, 9, 5, 1, 8, 1, 2, 2, 2, 5,
2, 1, 6, 8, 5, 8, 4, 7, 8, 3, 3, 7, 4, 4, 4, 7, 5, 5, 9, 2, 6, 6, 6,
1, 4, 7, 1, 6, 6, 6, 5, 5, 6, 9, 5, 7, 6, 5, 6, 3, 8, 9, 9, 6, 6, 2,
1, 3, 1, 6, 2, 7, 4, 3, 6, 7, 3, 4, 2, 9, 2, 4, 4, 9, 5, 5, 5, 4, 7,
5, 4, 2, 4, 7, 3, 8, 1, 8, 1, 9, 7, 6, 4, 4, 1, 3, 6, 9, 1, 1, 5, 6],
# Between 10 and 500
[128, 142, 351, 128, 436, 451, 28, 416, 204, 194, 429, 33, 55,
122, 305, 466, 293, 386, 203, 201, 194, 288, 184, 15, 486, 39,
419, 311, 190, 101, 164, 79, 16, 206, 176, 74, 189, 12, 77,
182, 296, 280, 169, 282, 415, 108, 407, 11, 268, 135, 356, 326,
312, 294, 225, 406, 172, 331, 196, 266, 80, 406, 388, 205, 401,
421, 224, 106, 45, 247, 200, 201, 451, 205, 179, 279, 172, 30,
216, 236, 56, 323, 206, 14, 383, 211, 106, 24, 60, 210, 36,
83, 348, 276, 397, 415, 32, 58, 15, 224, 379, 248, 166, 450,
161, 74, 306, 412, 471, 108, 169, 157, 75, 59, 14, 295, 390],
# Bits (1s and 0s)
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1,
0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0,
1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
# With negative numbers (between -10 and 10)
[-10, 8, 0, 0, -5, 6, -1, 2, -10, -4, -5, 1, 8,
-7, 2, -2, -5, 5, 5, -5, -6, -1, -1, 5, 9, 1,
1, -10, 9, 5, -7, 7, 8, -1, -5, -5, 3, -7, 8,
-2, 6, -6, 0, -9, -7, -10, -2, 5, -8, -9, 7, -9,
-6, 4, -2, -8, -7, 2, -3, 8, -1, 9, 7, 4, -3,
8, 3, -5, 4, -5, -10, -8, 3, -9, -9, -6, -3, 2,
-6, -10, -6, 8, -4, -10, 4, -8, -6, -3, -8, 6, 2,
-5, 6, 2, 0, -4, -2, -8, -7, 5, 6, 9, 9, 4,
5, 8, 3, -9, -6, 7, -1, 6, -5, 1, -2, -7, 0],
]
##########################################################################
## Statistics Tests
##########################################################################
class StatisticsTests(unittest.TestCase):
"""
Test cases for the statistics helper functions
"""
def test_mean_edge_cases(self):
"""
Test any edge cases that are passed to mean.
"""
self.assertEqual(mean(EMPTY), None)
self.assertEqual(mean(None), None)
def test_mean_integers(self):
"""
Test mean on integers returns correct float
"""
# Computed using numpy
expected = [
4.8260869565217392, 219.92307692307693,
0.47826086956521741, -0.89743589743589747
]
for idx, data in enumerate(INTEGERS):
mu = mean(data)
self.assertIsInstance(mu, float)
self.assertEqual(mu, expected[idx])
def test_mean_floats(self):
"""
Test mean on floats returns correct float
"""
# Computed using numpy
expected = [
0.48705447450000006, 45.260727738500009,
-0.014150190199999982, 99.931501583200003
]
for idx, data in enumerate(FLOATS):
mu = mean(data)
self.assertIsInstance(mu, float)
self.assertAlmostEqual(mu, expected[idx])
def test_median_edge_cases(self):
"""
Test any edge cases that are passed to median.
"""
self.assertEqual(median(EMPTY), None)
self.assertEqual(median(None), None)
def test_median_integers(self):
"""
Test median on integers returns correct float
"""
# Computed using numpy
expected = [5, 204, 0, -1]
for idx, data in enumerate(INTEGERS):
mu = median(data)
self.assertIsInstance(mu, int)
self.assertEqual(mu, expected[idx])
def test_median_floats(self):
"""
Test median on floats returns correct float
"""
# Computed using numpy
expected = [
0.49114555999999998, 41.309186464999996,
-0.12851210999999998, 97.589412865
]
for idx, data in enumerate(FLOATS):
mu = median(data)
self.assertIsInstance(mu, float)
self.assertAlmostEqual(mu, expected[idx])
def test_median_even_integers(self):
"""
Test median on an even lengthed list of integers
"""
cases = [
[5, 6, 9, 6, 7, 2, 5, 5, 5, 3],
[6, 1, 6, 7, 2, 1, 4, 9, 2, 8, 3, 8, 7, 5],
[6, 5, 6, 1, 5, 1, 6, 8, 2, 6, 8, 5, 5, 2, 1, 8, 1, 7]
]
# Computed using numpy
expected = [5.0, 5.5, 5.0]
for case, expect in zip(cases, expected):
mu = median(case)
self.assertIsInstance(mu, float)
self.assertEqual(expect, mu)
##########################################################################
## Online Variance Tests
##########################################################################
class OnlineVarianceTests(unittest.TestCase):
"""
Test cases for the OnlineVariance class
"""
def test_mean_edge_cases(self):
"""
Test any edge cases that are passed to mean.
"""
online = OnlineVariance()
# Test the case of no samples
self.assertEqual(online.mean, 0.0)
self.assertEqual(online.std, 0.0)
self.assertEqual(online.var, 0.0)
# Test the case of one sample
online.update(42)
self.assertEqual(online.mean, 42.0)
self.assertEqual(online.std, 0.0)
self.assertEqual(online.var, 0.0)
def test_online_variance_length(self):
"""
Test that the length of an online variance is the number of samples.
"""
cases = INTEGERS + FLOATS
expected = [115,117,115,117,100,100,100,100]
for data, case in zip(cases, expected):
online = OnlineVariance()
self.assertEqual(len(online), 0)
for item in data:
online.update(item)
self.assertEqual(len(online), case)
def test_online_integers_mean(self):
"""
Test online variance computing means on integers
"""
# Computed using numpy
expected = [
4.8260869565217392, 219.92307692307693,
0.47826086956521741, -0.89743589743589747
]
for data, case in zip(INTEGERS, expected):
online = OnlineVariance()
self.assertEqual(online.mean, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.mean, float)
self.assertEqual(online.mean, case)
def test_online_float_mean(self):
"""
Test online variance computing means on floats.
"""
# Computed using numpy
expected = [
0.48705447450000006, 45.260727738500009,
-0.014150190199999982, 99.931501583200003
]
for data, case in zip(FLOATS, expected):
online = OnlineVariance()
self.assertEqual(online.mean, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.mean, float)
self.assertAlmostEqual(online.mean, case)
@unittest.skip("currently not accurate to enough places")
def test_online_integers_variance(self):
"""
Test online variance computing variance on integers
"""
# Computed using numpy
expected = [
5.9001890359168243, 18264.618014464173,
0.24952741020793956, 35.203155818540431
]
for data, case in zip(INTEGERS, expected):
online = OnlineVariance()
self.assertEqual(online.variance, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.variance, float)
self.assertAlmostEqual(online.variance, case, places=3)
@unittest.skip("currently not accurate to enough places")
def test_online_float_variance(self):
"""
Test online variance computing variance on floats.
"""
# Computed using numpy
expected = [
0.073895851263651294, 766.42173756693592,
1.0187313521468584, 348.99176719359377
]
for data, case in zip(FLOATS, expected):
online = OnlineVariance()
self.assertEqual(online.variance, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.variance, float)
self.assertAlmostEqual(online.variance, case, places=3)
@unittest.skip("currently not accurate to enough places")
def test_online_integers_standard_deviation(self):
"""
Test online variance computing standard deviation on integers
"""
# Computed using numpy
expected = [
2.4290304724142149, 135.1466537301763,
0.49952718665548079, 5.9332247402690239
]
for data, case in zip(INTEGERS, expected):
online = OnlineVariance()
self.assertEqual(online.standard_deviation, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.standard_deviation, float)
self.assertAlmostEqual(online.standard_deviation, case, places=3)
@unittest.skip("currently not accurate to enough places")
def test_online_float_standard_deviation(self):
"""
Test online variance computing standard deviation on floats.
"""
# Computed using numpy
expected = [
0.27183791358758491, 27.684322956629007,
1.0093222241419528, 18.681321344958278
]
for data, case in zip(FLOATS, expected):
online = OnlineVariance()
self.assertEqual(online.standard_deviation, 0.0)
for item in data:
online.update(item)
self.assertIsInstance(online.standard_deviation, float)
self.assertAlmostEqual(online.standard_deviation, case, places=3)
def test_online_variance_initialization(self):
"""
Be able to initialize online variance with an iterable.
"""
# Computed using numpy
expected = [
(115, 4.8260869565217392),
(117, 219.92307692307693),
(115, 0.47826086956521741),
(117, -0.89743589743589747),
(100, 0.48705447450000006),
(100, 45.260727738500009),
(100, -0.014150190199999982),
(100, 99.931501583200003),
]
for data, (length, mean) in zip(INTEGERS+FLOATS, expected):
online = OnlineVariance(data)
self.assertEqual(len(online),length)
self.assertAlmostEqual(online.mean, mean)
def test_online_variance_addition(self):
"""
Be able to add two online variance objects together for a new mean.
"""
# Computed using numpy
expected = [
# (mean, stddev, variance)
(4.8260869565217392, 2.4290304724142149, 5.9001890359168243),
(113.30172413793103, 144.15193253022923, 20779.779652199759),
(2.652173913043478, 2.7929833769049353, 7.8007561436672956),
(1.9396551724137931, 5.3728063576641221, 28.867048156956006),
(2.8079323137209302, 2.8060961522546988, 7.8741756156986247),
(23.632896622558139, 27.683598847110545, 766.38164512774028),
(2.5748138650232555, 3.0754200874004387, 9.4582087139861208),
(49.061163527069773, 49.150086127405451, 2415.730966331374),
(113.30172413793103, 144.15193253022923, 20779.779652199762),
(219.92307692307693, 135.14665373017627, 18264.618014464169),
(111.14655172413794, 145.77129904765351, 21249.271626040427),
(109.51282051282051, 146.08331631545036, 21340.335305719924),
(118.80048593294931, 147.68865272870397, 21811.93814481972),
(139.43351508686635, 133.34488923389478, 17780.859484799668),
(118.56951604138249, 147.87525029514103, 21867.089649850604),
(164.62742008442396, 116.55887937387467, 13585.972360893467),
(2.652173913043478, 2.7929833769049353, 7.8007561436672956),
(111.14655172413794, 145.77129904765351, 21249.271626040427),
(0.47826086956521741, 0.49952718665548079, 0.24952741020793953),
(-0.21551724137931033, 4.2837021421670043, 18.35010404280618),
(0.48235091837209304, 0.40970422355484531, 0.16785755079867867),
(21.307315227209301, 29.249540614746596, 855.53562617371074),
(0.24923246967441859, 0.8170794298465649, 0.66761879467838758),
(46.735582131720932, 51.216754643725118, 2623.1559562355387),
(1.9396551724137931, 5.3728063576641221, 28.867048156956006),
(109.51282051282051, 146.08331631545036, 21340.335305719924),
(-0.21551724137931033, 4.2837021421670052, 18.350104042806187),
(-0.89743589743589747, 5.9332247402690239, 35.203155818540431),
(-0.25942190115207375, 4.4148407820487128, 19.490819130840489),
(20.37360725276498, 30.025743253384654, 901.54525791817412),
(-0.49039179271889405, 4.4321344921977124, 19.643816156928672),
(45.567512250322572, 52.017556151674398, 2705.8261479925991),
(2.8079323137209302, 2.8060961522546988, 7.8741756156986256),
(118.80048593294931, 147.68865272870397, 21811.93814481972),
(0.48235091837209304, 0.40970422355484531, 0.16785755079867867),
(-0.25942190115207375, 4.4148407820487128, 19.490819130840489),
(0.48705447450000006, 0.27183791358758491, 0.073895851263651294),
(22.8738911065, 29.739170652473767, 884.41827109695691),
(0.23645214215000002, 0.78045828247544069, 0.60911513068451473),
(50.209278028850001, 51.447374536619328, 2646.8323467111868),
(23.632896622558139, 27.683598847110545, 766.38164512774028),
(139.43351508686635, 133.34488923389478, 17780.859484799668),
(21.307315227209301, 29.2495406147466, 855.53562617371097),
(20.373607252764977, 30.025743253384654, 901.54525791817412),
(22.873891106500004, 29.739170652473767, 884.41827109695691),
(45.260727738500009, 27.684322956629007, 766.4217375669358),
(22.623288774150005, 29.936163370148371, 896.17387732421298),
(72.59611466085002, 36.123816666776065, 1304.9301305748484),
(2.5748138650232555, 3.0754200874004387, 9.4582087139861208),
(118.56951604138249, 147.875250295141, 21867.089649850601),
(0.24923246967441862, 0.8170794298465649, 0.66761879467838758),
(-0.49039179271889405, 4.4321344921977124, 19.643816156928676),
(0.23645214215000002, 0.78045828247544069, 0.60911513068451473),
(22.623288774149998, 29.936163370148371, 896.17387732421298),
(-0.014150190199999982, 1.0093222241419528, 1.0187313521468584),
(49.958675696500002, 51.6941831967128, 2672.2885763753043),
(49.061163527069773, 49.150086127405451, 2415.730966331374),
(164.62742008442396, 116.55887937387467, 13585.972360893466),
(46.735582131720932, 51.216754643725118, 2623.1559562355387),
(45.567512250322586, 52.017556151674405, 2705.8261479925995),
(50.209278028849994, 51.447374536619328, 2646.8323467111868),
(72.596114660849992, 36.123816666776065, 1304.9301305748484),
(49.958675696499995, 51.6941831967128, 2672.2885763753043),
(99.931501583200003, 18.681321344958278, 348.99176719359377)
]
for (a, b), (mean, stddev, variance) in zip(product(INTEGERS + FLOATS, repeat=2), expected):
oa = OnlineVariance(a)
ob = OnlineVariance(b)
online = oa + ob
self.assertIsNot(oa, ob)
self.assertIsNot(oa, online)
self.assertIsNot(ob, online)
self.assertAlmostEqual(mean, online.mean)
# Not precise enough for these calculations
# self.assertAlmostEqual(stddev, online.stddev)
# self.assertAlmostEqual(variance, online.variance)
|
mit
| -3,628,218,771,555,699,700
| 42.163043
| 100
| 0.580123
| false
| 2.720484
| true
| false
| false
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/hplip/base/g.py
|
1
|
11695
|
# -*- coding: utf-8 -*-
#
# (c) Copyright 2003-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
# NOTE: This module is safe for 'from g import *'
#
# Std Lib
import sys
import os
import os.path
import ConfigParser
import locale
import pwd
import stat
import re
# Local
from codes import *
import logger
# System wide logger
log = logger.Logger('', logger.Logger.LOG_LEVEL_INFO, logger.Logger.LOG_TO_CONSOLE)
log.set_level('info')
MINIMUM_PYQT_MAJOR_VER = 3
MINIMUM_PYQT_MINOR_VER = 14
MINIMUM_QT_MAJOR_VER = 3
MINIMUM_QT_MINOR_VER = 0
def to_bool(s, default=False):
if isinstance(s, str) and s:
if s[0].lower() in ['1', 't', 'y']:
return True
elif s[0].lower() in ['0', 'f', 'n']:
return False
elif isinstance(s, bool):
return s
return default
# System wide properties
class Properties(dict):
def __getattr__(self, attr):
if attr in self.keys():
return self.__getitem__(attr)
else:
return ""
def __setattr__(self, attr, val):
self.__setitem__(attr, val)
prop = Properties()
class ConfigBase(object):
def __init__(self, filename):
self.filename = filename
self.conf = ConfigParser.ConfigParser()
self.read()
def get(self, section, key, default=u''):
try:
return self.conf.get(section, key)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
return default
def set(self, section, key, value):
if not self.conf.has_section(section):
self.conf.add_section(section)
self.conf.set(section, key, value)
self.write()
def sections(self):
return self.conf.sections()
def has_section(self, section):
return self.conf.has_section(section)
def options(self, section):
return self.conf.options(section)
keys = options
def read(self):
if self.filename is not None:
filename = self.filename
if filename.startswith("/root/"):
# Don't try opening a file in root's home directory.
log.error("attempted to read from '%s'" % self.filename)
return
try:
fp = open(self.filename, "r")
self.conf.readfp(fp)
fp.close()
except (OSError, IOError):
log.debug("Unable to open file %s for reading." % self.filename)
def write(self):
if self.filename is not None:
filename = self.filename
if filename.startswith("/root/") or filename.startswith("/etc/"):
# Don't try writing a file in root's home directory or
# the system-wide config file.
# See bug #479178.
log.error("attempted to write to '%s'" % self.filename)
return
try:
fp = open(self.filename, "w")
self.conf.write(fp)
fp.close()
except (OSError, IOError):
log.debug("Unable to open file %s for writing." % self.filename)
class SysConfig(ConfigBase):
def __init__(self):
ConfigBase.__init__(self, '/etc/hp/hplip.conf')
class State(ConfigBase):
def __init__(self):
ConfigBase.__init__(self, '/var/lib/hp/hplip.state')
class UserConfig(ConfigBase):
def __init__(self):
if not os.geteuid() == 0:
prop.user_dir = os.path.expanduser('~/.hplip')
try:
if not os.path.exists(prop.user_dir):
os.makedirs(prop.user_dir)
except OSError:
pass # This is sometimes OK, if running hpfax: for example
prop.user_config_file = os.path.join(prop.user_dir, 'hplip.conf')
if not os.path.exists(prop.user_config_file):
try:
file(prop.user_config_file, 'w').close()
s = os.stat(os.path.dirname(prop.user_config_file))
os.chown(prop.user_config_file, s[stat.ST_UID], s[stat.ST_GID])
except IOError:
pass
ConfigBase.__init__(self, prop.user_config_file)
else:
# If running as root, conf file is None
prop.user_dir = None
prop.user_config_file = None
ConfigBase.__init__(self, None)
def workingDirectory(self):
t = self.get('last_used', 'working_dir', os.path.expanduser("~"))
try:
t = t.decode('utf-8')
except UnicodeError:
log.error("Invalid unicode: %s" % t)
log.debug("working directory: %s" % t)
return t
def setWorkingDirectory(self, t):
self.set('last_used', 'working_dir', t.encode('utf-8'))
log.debug("working directory: %s" % t.encode('utf-8'))
os.umask(0037)
# System Config File: Directories and build settings. Not altered after installation.
sys_conf = SysConfig()
# System State File: System-wide runtime settings
sys_state = State()
# Per-user Settings File: (Note: For Qt4 code, limit the use of this to non-GUI apps. only)
user_conf = UserConfig()
# Language settings
try:
prop.locale, prop.encoding = locale.getdefaultlocale()
except ValueError:
prop.locale = 'en_US'
prop.encoding = 'UTF8'
prop.version = sys_conf.get('hplip', 'version', '0.0.0') # e.g., 3.9.2b.10
_p, _x = re.compile(r'(\d\w*)', re.I), []
for _y in prop.version.split('.')[:3]:
_z = _p.match(_y)
if _z is not None:
_x.append(_z.group(1))
prop.installed_version = '.'.join(_x) # e.g., '3.9.2'
try:
prop.installed_version_int = int(''.join(['%02x' % int(_y) for _y in _x]), 16) # e.g., 0x030902 -> 198914
except ValueError:
prop.installed_version_int = 0
prop.home_dir = sys_conf.get('dirs', 'home', os.path.realpath(os.path.normpath(os.getcwd())))
prop.username = pwd.getpwuid(os.getuid())[0]
pdb = pwd.getpwnam(prop.username)
prop.userhome = pdb[5]
prop.history_size = 50
prop.data_dir = os.path.join(prop.home_dir, 'data')
prop.image_dir = os.path.join(prop.home_dir, 'data', 'images')
prop.xml_dir = os.path.join(prop.home_dir, 'data', 'xml')
prop.models_dir = os.path.join(prop.home_dir, 'data', 'models')
prop.localization_dir = os.path.join(prop.home_dir, 'data', 'localization')
prop.max_message_len = 8192
prop.max_message_read = 65536
prop.read_timeout = 90
prop.ppd_search_path = '/usr/share;/usr/local/share;/usr/lib;/usr/local/lib;/usr/libexec;/opt;/usr/lib64'
prop.ppd_search_pattern = 'HP-*.ppd.*'
prop.ppd_download_url = 'http://www.linuxprinting.org/ppd-o-matic.cgi'
prop.ppd_file_suffix = '-hpijs.ppd'
# Build and install configurations
prop.gui_build = to_bool(sys_conf.get('configure', 'gui-build', '0'))
prop.net_build = to_bool(sys_conf.get('configure', 'network-build', '0'))
prop.par_build = to_bool(sys_conf.get('configure', 'pp-build', '0'))
prop.usb_build = True
prop.scan_build = to_bool(sys_conf.get('configure', 'scanner-build', '0'))
prop.fax_build = to_bool(sys_conf.get('configure', 'fax-build', '0'))
prop.doc_build = to_bool(sys_conf.get('configure', 'doc-build', '0'))
prop.foomatic_xml_install = to_bool(sys_conf.get('configure', 'foomatic-xml-install', '0'))
prop.foomatic_ppd_install = to_bool(sys_conf.get('configure', 'foomatic-ppd-install', '0'))
prop.hpcups_build = to_bool(sys_conf.get('configure', 'hpcups-install', '0'))
prop.hpijs_build = to_bool(sys_conf.get('configure', 'hpijs-install', '0'))
# Spinner, ala Gentoo Portage
spinner = "\|/-\|/-"
spinpos = 0
def update_spinner():
global spinner, spinpos
if not log.is_debug() and sys.stdout.isatty():
sys.stdout.write("\b" + spinner[spinpos])
spinpos=(spinpos + 1) % 8
sys.stdout.flush()
def cleanup_spinner():
if not log.is_debug() and sys.stdout.isatty():
sys.stdout.write("\b \b")
sys.stdout.flush()
# Internal/messaging errors
ERROR_STRINGS = {
ERROR_SUCCESS : 'No error',
ERROR_UNKNOWN_ERROR : 'Unknown error',
ERROR_DEVICE_NOT_FOUND : 'Device not found',
ERROR_INVALID_DEVICE_ID : 'Unknown/invalid device-id field',
ERROR_INVALID_DEVICE_URI : 'Unknown/invalid device-uri field',
ERROR_DATA_LENGTH_EXCEEDS_MAX : 'Data length exceeds maximum',
ERROR_DEVICE_IO_ERROR : 'Device I/O error',
ERROR_NO_PROBED_DEVICES_FOUND : 'No probed devices found',
ERROR_DEVICE_BUSY : 'Device busy',
ERROR_DEVICE_STATUS_NOT_AVAILABLE : 'DeviceStatus not available',
ERROR_INVALID_SERVICE_NAME : 'Invalid service name',
ERROR_ERROR_INVALID_CHANNEL_ID : 'Invalid channel-id (service name)',
ERROR_CHANNEL_BUSY : 'Channel busy',
ERROR_DEVICE_DOES_NOT_SUPPORT_OPERATION : 'Device does not support operation',
ERROR_DEVICEOPEN_FAILED : 'Device open failed',
ERROR_INVALID_DEVNODE : 'Invalid device node',
ERROR_INVALID_HOSTNAME : "Invalid hostname ip address",
ERROR_INVALID_PORT_NUMBER : "Invalid JetDirect port number",
ERROR_NO_CUPS_QUEUE_FOUND_FOR_DEVICE : "No CUPS queue found for device.",
ERROR_DATFILE_ERROR: "DAT file error",
ERROR_INVALID_TIMEOUT: "Invalid timeout",
ERROR_IO_TIMEOUT: "I/O timeout",
ERROR_FAX_INCOMPATIBLE_OPTIONS: "Incompatible fax options",
ERROR_FAX_INVALID_FAX_FILE: "Invalid fax file",
ERROR_FAX_FILE_NOT_FOUND: "Fax file not found",
ERROR_INTERNAL : 'Unknown internal error',
}
class Error(Exception):
def __init__(self, opt=ERROR_INTERNAL):
self.opt = opt
self.msg = ERROR_STRINGS.get(opt, ERROR_STRINGS[ERROR_INTERNAL])
log.debug("Exception: %d (%s)" % (opt, self.msg))
Exception.__init__(self, self.msg, opt)
# Make sure True and False are avail. in pre-2.2 versions
try:
True
except NameError:
True = (1==1)
False = not True
# as new translations are completed, add them here
supported_locales = { 'en_US': ('us', 'en', 'en_us', 'american', 'america', 'usa', 'english'),}
# Localization support was disabled in 3.9.2
#'zh_CN': ('zh', 'cn', 'zh_cn' , 'china', 'chinese', 'prc'),
#'de_DE': ('de', 'de_de', 'german', 'deutsche'),
#'fr_FR': ('fr', 'fr_fr', 'france', 'french', 'français'),
#'it_IT': ('it', 'it_it', 'italy', 'italian', 'italiano'),
#'ru_RU': ('ru', 'ru_ru', 'russian'),
#'pt_BR': ('pt', 'br', 'pt_br', 'brazil', 'brazilian', 'portuguese', 'brasil', 'portuguesa'),
#'es_MX': ('es', 'mx', 'es_mx', 'mexico', 'spain', 'spanish', 'espanol', 'español'),
#}
|
gpl-3.0
| 3,392,872,497,385,884,000
| 32.991279
| 116
| 0.593175
| false
| 3.458444
| true
| false
| false
|
ctlewitt/Invisible-Keyboard
|
analyze_ngram_stats.py
|
1
|
3628
|
import re
import string
actual_letter_frequency = {"a":"11.602%", "b":"4.702%", "c":"3.511%", "d":"2.670%", "e":"2.007%", "f":"3.779%", "g":"1.950%", "h":"7.232%", "i":"6.286%", "j":".597%", "k":".590%", "l":"2.705%", "m":"4.374%", "n":"2.365%", "o":"6.264%", "p":"2.545%", "q":".173%", "r":"1.653%", "s":"7.755%", "t":"16.671%", "u":"1.487%", "v":".649%", "w":"6.753%", "x":".017%", "y":"1.620%", "z":".034%"}
#data from: http://en.wikipedia.org/wiki/Letter_frequency#Relative_frequencies_of_the_first_letters_of_a_word_in_the_English_language
LETTER = 0
COUNT = 1
#letter_count is a list of lists [letter, count]
def check_each_word(all_letters_file_write, one_letter_file_read):
#get number of lines/words
#get min frequency
#get max frequency
#get total number count
#get average count per word
num_words = 0
min_frequency = 100000
max_frequency = 0
total_frequency = 0
with open(one_letter_file_read) as f_read:
for line in f_read:
#get word frequency
###CHECK THIS REGULAR EXPRESSION###
word_frequency_info = re.match('.*\t([0-9]*)\n', line)
word_frequency_words = word_frequency_info.groups()
word_frequency = int(word_frequency_words[0])
#set stats
num_words += 1
total_frequency += word_frequency
if min_frequency > word_frequency:
min_frequency = word_frequency
if max_frequency < word_frequency:
max_frequency = word_frequency
average_frequency = total_frequency / num_words
#print results
all_letters_file_write.write("num_words: " + str(num_words) + "\n")
all_letters_file_write.write("min_frequency: " + str(min_frequency) + "\n")
all_letters_file_write.write("max_frequency: " + str(max_frequency) + "\n")
all_letters_file_write.write("total_frequency: " + str(total_frequency) + "\n")
all_letters_file_write.write("average_frequency: " + str(average_frequency) + "\n")
return num_words, total_frequency
def get_file_name(letter):
return "combined_one_gram_55Kthresh_" + letter + ".txt"
letter_count = []
total_words = 0
sum_total_frequency = 0
#go through each 1-gram letter's file and collect stats on the word counts for each one
with open("combined_one_gram_55Kthresh_stats.txt", "w") as n_gram_stats:
n_gram_stats.write("Stats on words starting with each letter\n")
for letter in string.lowercase:
n_gram_stats.write(letter + ":\n")
num_words, frequency = check_each_word(n_gram_stats, get_file_name(letter)) #checks new_one_gram_[letter].txt data
n_gram_stats.write("\n")
letter_count.append([letter, num_words])
total_words += num_words
sum_total_frequency += frequency
total_words += 0.0 #turn into double for future calculations. sort of a hack.... :(
#record percent stats to each letter's 1-gram data. (ie, What percentage of the total words begin with this letter?)
n_gram_stats.write("\n\n\n")
n_gram_stats.write("AGGREGATE RESULTS:\n")
for letter_stat in letter_count:
n_gram_stats.write(letter_stat[LETTER] + ":\n")
n_gram_stats.write(" count: " + str(letter_stat[COUNT]) + "\n")
n_gram_stats.write(" prcnt: " + str(letter_stat[COUNT]*100/total_words) + "%\n")
n_gram_stats.write(" want: " + str(actual_letter_frequency[letter_stat[LETTER]]) + "\n")
n_gram_stats.write("\n\n")
n_gram_stats.write("Total Word Count: " + str(total_words) + "\n")
n_gram_stats.write("Average Frequency: " + str(sum_total_frequency/total_words) + "\n")
|
mit
| -3,000,366,039,412,939,000
| 44.3625
| 386
| 0.615766
| false
| 3.157528
| false
| false
| false
|
Affirm/cabot
|
cabot/cabotapp/tasks.py
|
1
|
11043
|
import os
from datetime import timedelta
import logging
from celery import Celery
from celery._state import set_default_app
from celery.task import task
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from cabot.cabotapp.models import Schedule, StatusCheckResultTag, StatusCheckResult, Acknowledgement, StatusCheck
from cabot.cabotapp.schedule_validation import update_schedule_problems
from cabot.cabotapp.utils import build_absolute_url
from cabot.celery.celery_queue_config import STATUS_CHECK_TO_QUEUE
from django.conf import settings
from django.utils import timezone
from cabot.cabotapp import models
from cabot.metricsapp.defs import SCHEDULE_PROBLEMS_EMAIL_SNOOZE_HOURS
from cabot.metricsapp.models import MetricsStatusCheckBase
celery = Celery(__name__)
celery.config_from_object(settings)
# Celery should set this app as the default, however the 'celery.current_app'
# api uses threadlocals, so code running in different threads/greenlets uses
# the fallback default instead of this app when no app is specified. This
# causes confusing connection errors when celery tries to connect to a
# non-existent rabbitmq server. It seems to happen mostly when using the
# 'celery.canvas' api. To get around this, we use the internal 'celery._state'
# api to force our app to be the default.
set_default_app(celery)
logger = logging.getLogger(__name__)
def _classify_status_check(pk):
"""
Maps the check to either normal or high priority based on the dict
cabot.celery.celery_queue_config.STATUS_CHECK_TO_QUEUE
"""
check = models.StatusCheck.objects.get(pk=pk)
# If the status check we are running is an instance of MetricsStatusCheckBase
# (i.e. Grafana/Elasticsearch), then StatusCheck.importance is determined by
# the type of failure: If the 'high_alert_value' is set and the check fails,
# the importance is set to ERROR or CRITICAL. However, if this value is null
# or does not fail, and 'warning_value' is not null and fails instead, then
# the importance is set to WARNING. As such, we run all importance levels of
# MetricsStatusCheckBase based on their maximum importance.
if not isinstance(check, MetricsStatusCheckBase):
check_queue = STATUS_CHECK_TO_QUEUE[check.check_category][check.importance]
else:
if check.high_alert_value is None:
check_queue = STATUS_CHECK_TO_QUEUE[check.check_category][models.CheckGroupMixin.WARNING_STATUS]
else:
check_queue = STATUS_CHECK_TO_QUEUE[check.check_category][check.high_alert_importance]
return check_queue
@task(ignore_result=True)
def run_status_check(pk):
check = models.StatusCheck.objects.get(pk=pk)
check.run()
@task(ignore_result=True)
def run_all_checks():
checks = models.StatusCheck.objects.filter(active=True).all()
for check in checks:
if check.should_run():
check_queue = _classify_status_check(check.pk)
run_status_check.apply_async((check.pk,), queue=check_queue, routing_key=check_queue)
@task(ignore_result=True)
def update_services(ignore_result=True):
# Avoid importerrors and the like from legacy scheduling
return
@task(ignore_result=True)
def update_service(service_or_id):
if not isinstance(service_or_id, models.Service):
service = models.Service.objects.get(id=service_or_id)
else:
service = service_or_id
service.update_status()
@task(ignore_result=True)
def update_all_services():
services = models.Service.objects.filter(alerts_enabled=True)
for service in services:
update_service.apply_async((service.id,))
@task(ignore_result=True)
def update_shifts_and_problems():
schedules = models.Schedule.objects.all()
for schedule in schedules:
update_shift_and_problems.apply_async((schedule.id,))
@task(ignore_result=True)
def update_shift_and_problems(schedule_id):
schedule = models.Schedule.objects.get(id=schedule_id)
try:
models.update_shifts(schedule)
except Exception:
logger.exception('Error when updating shifts for schedule %s.', schedule.name)
try:
update_schedule_problems(schedule) # must happen after update_shifts()
except Exception:
logger.exception('Error when updating schedule problems for schedule %s.', schedule.name)
# if there are any problems, queue an email to go out
if schedule.has_problems() and not schedule.problems.is_silenced():
send_schedule_problems_email.apply_async((schedule.pk,))
@task(ignore_result=True)
def reset_shifts_and_problems(schedule_id):
"""
Update shifts & problems for a schedule, called by the Schedule post_save signal handler.
Does not send schedule problems warning emails.
"""
try:
schedule = models.Schedule.objects.get(id=schedule_id)
except Schedule.DoesNotExist:
# this can happen if the schedule got deleted after this task was scheduled but before it started to run
return
try:
models.update_shifts(schedule)
except Exception:
logger.exception('Error when resetting shifts for schedule %s.', schedule.name)
try:
update_schedule_problems(schedule)
except Exception:
logger.exception('Error updating schedule problems list for schedule %s.', schedule.name)
@task(ignore_result=True)
def clean_db(days_to_retain=60):
"""
Clean up database otherwise it gets overwhelmed with StatusCheckResults.
To loop over undeleted results, spawn new tasks to make sure
db connection closed etc
"""
to_discard_results = models.StatusCheckResult.objects.filter(
time__lte=timezone.now()-timedelta(days=days_to_retain))
to_discard_snapshots = models.ServiceStatusSnapshot.objects.filter(
time__lte=timezone.now()-timedelta(days=days_to_retain))
to_discard_acks = models.Acknowledgement.objects.filter(
closed_at__lte=timezone.now()-timedelta(days=days_to_retain))
result_ids = to_discard_results.values_list('id', flat=True)[:100]
snapshot_ids = to_discard_snapshots.values_list('id', flat=True)[:100]
ack_ids = to_discard_acks.values_list('id', flat=True)[:100]
if not result_ids:
logger.info('Completed deleting StatusCheckResult objects')
if not snapshot_ids:
logger.info('Completed deleting ServiceStatusSnapshot objects')
if not ack_ids:
logger.info('Completed deleting Acknowledgement objects')
if (not snapshot_ids) and (not result_ids) and (not ack_ids):
return
logger.info('Processing %s StatusCheckResult objects' % len(result_ids))
logger.info('Processing %s ServiceStatusSnapshot objects' %
len(snapshot_ids))
models.StatusCheckResult.objects.filter(id__in=result_ids).delete()
models.ServiceStatusSnapshot.objects.filter(id__in=snapshot_ids).delete()
models.Acknowledgement.objects.filter(id__in=ack_ids).delete()
clean_db.apply_async(kwargs={'days_to_retain': days_to_retain},
countdown=3)
# because django 1.6 doesn't have send_mail(html_message=...) :|
def _send_mail_html(subject, message, from_email, recipient_list):
msg = EmailMessage(subject, message, from_email, recipient_list)
msg.content_subtype = 'html' # main content type is html
msg.send()
@task(ignore_result=True)
def send_schedule_problems_email(schedule_id):
"""
Send off an email as a celery task
:param schedule_id schedule ID
"""
try:
schedule = models.Schedule.objects.get(pk=schedule_id)
problems = schedule.problems
except models.Schedule.DoesNotExist, models.ScheduleProblems.DoesNotExist:
# if the schedule or problems got deleted, nothing to do
logger.info("schedule or problems for pk {} no longer exist, not sending email".format(schedule_id))
return
# check if problems became silenced since the email got queued
if problems.is_silenced():
logger.info("schedule problems became silenced, not sending email")
return
# build the message
# make the schedule link absolute (add domain name) because this is going into an email
cabot_schedule_url = build_absolute_url(schedule.get_edit_url())
# build links to snooze email alerts
snooze_hours = SCHEDULE_PROBLEMS_EMAIL_SNOOZE_HOURS
snooze_links = [build_absolute_url(reverse('snooze-schedule-warnings',
kwargs={'pk': schedule.pk, 'hours': hours})) for hours in snooze_hours]
snoozes = ['<a href="{}">{} hours</a>'.format(link, hours) for link, hours in zip(snooze_links, snooze_hours)]
message = 'The schedule <a href="{}">{}</a> has some issues:\n\n{}\n\n' \
'Click <a href="{}">here</a> to review the schedule\'s configuration.\n' \
'If you don\'t want to deal with this right now, you can silence these alerts for {}.' \
.format(cabot_schedule_url, schedule.name, problems.text, cabot_schedule_url, ' | '.join(snoozes))
message = message.replace('\n', '\n<br/>') # html ignores newlines
# figure out who to send it to (on-call + fallback)
recipients = models.get_duty_officers(schedule) + models.get_fallback_officers(schedule)
recipients = list(set([r.email for r in recipients if r.email])) # get unique emails
# for extra visibility, also log a warning
logger.warn("Sending schedule problems email to {}:\n\n{}".format(recipients, message))
if len(recipients) > 0:
try:
_send_mail_html(subject="Cabot Schedule '{}' Has Problems".format(schedule.name),
message=message,
from_email='Cabot Updates<{}>'.format(os.environ.get('CABOT_FROM_EMAIL')),
recipient_list=recipients)
except Exception as e:
logger.exception('Error sending schedule problems email: {}'.format(e))
@task(ignore_result=True)
def clean_orphaned_tags():
ack_tags = Acknowledgement.tags.through.objects.values('statuscheckresulttag')
result_tags = StatusCheckResult.tags.through.objects.values('statuscheckresulttag')
orphaned_tags = StatusCheckResultTag.objects.exclude(pk__in=ack_tags).exclude(pk__in=result_tags)
logger.info("Deleting {} orphaned tags (out of {} total tags)..."
.format(orphaned_tags.count(), StatusCheckResultTag.objects.count()))
orphaned_tags.delete()
@task(ignore_result=True)
def close_expired_acknowledgements():
now = timezone.now()
# loop over open acks where expire_at >= now
for ack in Acknowledgement.objects.filter(closed_at__isnull=True, expire_at__lte=now):
ack.close('expired')
update_check_and_services.apply_async((ack.status_check_id,))
@task(ignore_result=True)
def update_check_and_services(check_id):
# type: (int) -> None
check = StatusCheck.objects.get(id=check_id)
check.run()
for service in check.service_set.all():
service.update_status()
|
mit
| -190,869,898,702,287,970
| 39.01087
| 118
| 0.700625
| false
| 3.729483
| false
| false
| false
|
lalstef/QuickDatesFormatter
|
quick_dates_formatter.py
|
1
|
3710
|
import sublime, sublime_plugin
from datetime import datetime
# Date to be shown as example in the formats list
EXAMPLE_DATE = datetime(1970, 12, 31)
class QuickdatesformatterFormatDatesCommand(sublime_plugin.WindowCommand):
# Needed to find the dates in the chosen format within the text
date_to_regex = {
'%d/%m/%Y': r'\d{1,2}/\d{1,2}/\d{4}',
'%m/%d/%Y': r'\d{1,2}/\d{1,2}/\d{4}',
'%Y/%m/%d': r'\d{4}/\d{1,2}/\d{1,2}',
'%d-%m-%Y': r'\d{1,2}-\d{1,2}-\d{4}',
'%m-%d-%Y': r'\d{1,2}-\d{1,2}-\d{4}',
'%Y-%m-%d': r'\d{4}-\d{1,2}-\d{1,2}',
'%d.%m.%Y': r'\d{1,2}\.\d{1,2}\.\d{4}',
'%m.%d.%Y': r'\d{1,2}\.\d{1,2}\.\d{4}',
'%Y.%m.%d': r'\d{4}\.\d{1,2}\.\d{1,2}'
}
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.settings = None
self.formats = None
self.target_format = None
self.format = None
self.dates = [] # dates in current selection as datetime objects
self.formatted_dates = [] # date strings formatted in the target format
def format_highlighted(self, index):
view = self.window.active_view()
view.sel().clear()
# If quick panel cancelled, clear current state and return.
# ( index of -1 means that the quick panel was cancelled )
if index == -1:
self.dates = []
self.formatted_dates = []
return
# Get the format and the corresponding regex
self.format = self.formats[index][0]
pattern = self.date_to_regex[self.format]
# Add all found dates to the current selection
for region in view.find_all(pattern):
contents = view.substr(view.word(region))
try:
# To check if the regex match really fits the chosen format, we try parsing the string
# Then just add it to the list of dates, not to parse it again later
date_obj = datetime.strptime(contents, self.format)
self.dates.append(date_obj)
# If the string fits the chosen format, then add the region
view.sel().add(region)
except ValueError:
# Nothing to handle here, the string is not in the right format
pass
def format_selected(self, index):
# When format is selected, prompt the user for the desired target format
self.window.show_input_panel(
"Target format",
self.settings.get('target_format'),
self.target_format_selected,
self.target_format_change,
self.target_format_cancelled,
)
def target_format_cancelled(self):
# Clear current selection and formatted_dates list
self.window.active_view().sel().clear()
self.dates = []
self.formatted_dates = []
def target_format_change(self, fmt):
pass
def target_format_selected(self, fmt):
"""
Replace selected dates with dates formatted in target format as soon as the target format is input
"""
# Run replace_dates TextCommand
view = self.window.active_view()
view.run_command('quickdatesformatter_replace_dates', {'formatted_dates':
[ datetime.strftime(date_obj, self.target_format) for date_obj in self.dates]})
def run(self, *args, **kwargs):
self.settings = sublime.load_settings('QuickDatesFormatter.sublime-settings')
self.formats = self.settings.get('formats')
self.target_format = self.settings.get('target_format')
self.window.show_quick_panel(
[[label, datetime.strftime(EXAMPLE_DATE, fmt)] for fmt, label in self.formats],
self.format_selected,
sublime.MONOSPACE_FONT,
0, # menu item index which is highlighted by default
self.format_highlighted
)
class QuickdatesformatterReplaceDatesCommand(sublime_plugin.TextCommand):
def run(self, edit, formatted_dates=None):
regions = self.view.sel()
if formatted_dates and len(formatted_dates) >= len(regions):
for i in range(len(regions)):
self.view.replace(edit, regions[i], formatted_dates[i])
|
gpl-2.0
| -1,456,843,975,556,486,700
| 31.831858
| 100
| 0.675472
| false
| 3.008921
| false
| false
| false
|
pcournut/deep-learning-for-combinatorial-optimization
|
Google Pointer Net/decoder.py
|
1
|
5801
|
import tensorflow as tf
distr = tf.contrib.distributions
# RNN decoder with pointer net for the sequence-to-sequence model.
class pointer_decoder(object):
def __init__(self, encoder_output, cell, temperature, C, inference_mode, initializer):
self.encoder_output = encoder_output # Ref vectors to which attention is pointed: Tensor [Batch size x time steps x cell.state_size]
self.cell = cell # DECODER LSTM Cell
self.batch_size = encoder_output.get_shape()[0] # batch size
self.seq_length = encoder_output.get_shape()[1] # sequence length
self.n_hidden = cell.output_size # num_neurons
# Attending mechanism
with tf.variable_scope("glimpse") as glimpse:
self.W_ref_g =tf.get_variable("W_ref_g",[1,self.n_hidden,self.n_hidden],initializer=initializer)
self.W_q_g =tf.get_variable("W_q_g",[self.n_hidden,self.n_hidden],initializer=initializer)
self.v_g =tf.get_variable("v_g",[self.n_hidden],initializer=initializer)
# Pointing mechanism
with tf.variable_scope("pointer") as pointer:
self.W_ref =tf.get_variable("W_ref",[1,self.n_hidden,self.n_hidden],initializer=initializer)
self.W_q =tf.get_variable("W_q",[self.n_hidden,self.n_hidden],initializer=initializer)
self.v =tf.get_variable("v",[self.n_hidden],initializer=initializer)
self.mask = tf.zeros((self.batch_size,self.seq_length))
self.inference_mode = inference_mode # True for inference / False for training
self.temperature = temperature # temperature parameter
self.C = C # logit clip
self.log_softmax = [] # store log(p_theta(pi(t)|pi(<t),s)) for backprop
self.positions = [] # store visited cities for reward
# From a query (decoder output) [Batch size, n_hidden] and a set of reference (encoder_output) [Batch size, seq_length, n_hidden]
# predict a distribution over next decoder input
def attention(self,ref,query,temperature):
encoded_ref_g = tf.nn.conv1d(ref, self.W_ref_g, 1, "VALID", name="encoded_ref_g") # [Batch size, seq_length, n_hidden]
encoded_query_g = tf.expand_dims(tf.matmul(query, self.W_q_g, name="encoded_query_g"), 1) # [Batch size, 1, n_hidden]
scores_g = tf.reduce_sum(self.v_g * tf.tanh(encoded_ref_g + encoded_query_g), [-1], name="scores_g") # [Batch size, seq_length]
attention_g = tf.nn.softmax(scores_g,name="attention_g")
# 1 Glimpse = Linear combination of ref weighted by attention mask (or mask) = pointing mechanism query #########################################
glimpse = tf.multiply(ref, tf.expand_dims(attention_g,2))
glimpse = tf.reduce_sum(glimpse,1)
encoded_ref = tf.nn.conv1d(ref, self.W_ref, 1, "VALID", name="encoded_ref") # [Batch size, seq_length, n_hidden]
encoded_query = tf.expand_dims(tf.matmul(glimpse, self.W_q, name="encoded_query"), 1) # [Batch size, 1, n_hidden]
scores = tf.reduce_sum(self.v * tf.tanh(encoded_ref + encoded_query), [-1], name="scores") # [Batch size, seq_length]
attention = tf.nn.softmax(scores,name="attention") # [Batch size, Seq_length]
"""
if self.inference_mode == True:
attention = tf.nn.softmax(scores/temperature, name="attention") # [Batch size, Seq_length]
else:
attention = tf.nn.softmax(self.C*tf.tanh(scores), name="attention") # [Batch size, Seq_length]
"""
return attention, scores
# One pass of the decode mechanism
def decode(self,prev_state,prev_input,timestep):
with tf.variable_scope("loop"):
if timestep > 0:
tf.get_variable_scope().reuse_variables()
# Run the cell on a combination of the previous input and state
output,state=self.cell(prev_input,prev_state)
# Attention mechanism
distribution, scores=self.attention(self.encoder_output,output,self.temperature)
# Apply attention mask
masked_scores = scores - 100000000.*self.mask # [Batch size, seq_length]
# Multinomial distribution
prob = distr.Categorical(masked_scores)
# Sample from distribution
position = prob.sample()
position = tf.cast(position,tf.int32)
self.positions.append(position)
# Store log_prob for backprop
self.log_softmax.append(prob.log_prob(position))
# Update mask
self.mask = self.mask + tf.one_hot(position, self.seq_length)
# Retrieve decoder's new input
h = tf.transpose(self.encoder_output, [1, 0, 2]) # [Batch size x time steps x cell.state_size] to [time steps x Batch size x cell.state_size]
new_decoder_input = tf.gather(h,position)[0]
return state,new_decoder_input
def loop_decode(self,decoder_initial_state,decoder_first_input):
# decoder_initial_state: Tuple Tensor (c,h) of size [batch_size x cell.state_size]
# decoder_first_input: Tensor [batch_size x cell.state_size]
# Loop the decoding process and collect results
s,i = decoder_initial_state,tf.cast(decoder_first_input,tf.float32)
for step in range(self.seq_length):
s,i = self.decode(s,i,step)
# Stack visited indices
self.positions=tf.stack(self.positions,axis=1)
# Sum log_softmax over output steps
self.log_softmax=tf.add_n(self.log_softmax) #tf.reduce_sum(self.log_softmax,0)
# Return stacked lists of visited_indices and log_softmax for backprop
return self.positions,self.log_softmax
|
mit
| -3,904,509,820,302,967,000
| 47.177966
| 153
| 0.62472
| false
| 3.655325
| false
| false
| false
|
prefetchnta/questlab
|
bin/x64bin/python/37/Lib/fnmatch.py
|
1
|
4184
|
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j]
if '--' not in stuff:
stuff = stuff.replace('\\', r'\\')
else:
chunks = []
k = i+2 if pat[i] == '!' else i+1
while True:
k = pat.find('-', k, j)
if k < 0:
break
chunks.append(pat[i:k])
i = k+1
k = k+3
chunks.append(pat[i:j])
# Escape backslashes and hyphens for set difference (--).
# Hyphens that create ranges shouldn't be escaped.
stuff = '-'.join(s.replace('\\', r'\\').replace('-', r'\-')
for s in chunks)
# Escape set operations (&&, ~~ and ||).
stuff = re.sub(r'([&~|])', r'\\\1', stuff)
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] in ('^', '['):
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return r'(?s:%s)\Z' % res
|
lgpl-2.1
| 1,415,220,731,511,954,200
| 30.6875
| 79
| 0.487094
| false
| 4.163184
| false
| false
| false
|
anqxyr/pyscp
|
pyscp/orm.py
|
1
|
5732
|
#!/usr/bin/env python3
###############################################################################
# Module Imports
###############################################################################
import concurrent.futures
import logging
import peewee
import queue
from itertools import islice
###############################################################################
# Global Constants And Variables
###############################################################################
log = logging.getLogger('pyscp.orm')
pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)
queue = queue.Queue()
def queue_execution(fn, args=(), kw={}):
queue.put(dict(fn=fn, args=args, kw=kw))
pool.submit(async_write)
###############################################################################
# Database ORM Classes
###############################################################################
db = peewee.Proxy()
class BaseModel(peewee.Model):
class Meta:
database = db
@classmethod
def create(cls, **kw):
queue_execution(fn=super().create, kw=kw)
@classmethod
def create_table(cls):
if not hasattr(cls, '_id_cache'):
cls._id_cache = []
queue_execution(fn=super().create_table, args=(True,))
@classmethod
def insert_many(cls, data):
data_iter = iter(data)
chunk = list(islice(data_iter, 500))
while chunk:
queue_execution(
fn=lambda x: super(BaseModel, cls).insert_many(x).execute(),
args=(chunk, ))
chunk = list(islice(data_iter, 500))
@classmethod
def convert_to_id(cls, data, key='user'):
for row in data:
if row[key] not in cls._id_cache:
cls._id_cache.append(row[key])
row[key] = cls._id_cache.index(row[key]) + 1
yield row
@classmethod
def write_ids(cls, field_name):
cls.insert_many([
{'id': cls._id_cache.index(value) + 1, field_name: value}
for value in set(cls._id_cache)])
cls._id_cache.clear()
class ForumCategory(BaseModel):
title = peewee.CharField()
description = peewee.TextField()
class ForumThread(BaseModel):
category = peewee.ForeignKeyField(ForumCategory, null=True)
title = peewee.CharField(null=True)
description = peewee.TextField(null=True)
class Page(BaseModel):
url = peewee.CharField(unique=True)
html = peewee.TextField()
thread = peewee.ForeignKeyField(
ForumThread, related_name='page', null=True)
class User(BaseModel):
name = peewee.CharField(unique=True)
class Revision(BaseModel):
page = peewee.ForeignKeyField(Page, related_name='revisions', index=True)
user = peewee.ForeignKeyField(User, related_name='revisions', index=True)
number = peewee.IntegerField()
time = peewee.DateTimeField()
comment = peewee.CharField(null=True)
class Vote(BaseModel):
page = peewee.ForeignKeyField(Page, related_name='votes', index=True)
user = peewee.ForeignKeyField(User, related_name='votes', index=True)
value = peewee.IntegerField()
class ForumPost(BaseModel):
thread = peewee.ForeignKeyField(
ForumThread, related_name='posts', index=True)
user = peewee.ForeignKeyField(User, related_name='posts', index=True)
parent = peewee.ForeignKeyField('self', null=True)
title = peewee.CharField(null=True)
time = peewee.DateTimeField()
content = peewee.TextField()
class Tag(BaseModel):
name = peewee.CharField(unique=True)
class PageTag(BaseModel):
page = peewee.ForeignKeyField(Page, related_name='tags', index=True)
tag = peewee.ForeignKeyField(Tag, related_name='pages', index=True)
class OverrideType(BaseModel):
name = peewee.CharField(unique=True)
class Override(BaseModel):
url = peewee.ForeignKeyField(Page, to_field=Page.url, index=True)
user = peewee.ForeignKeyField(User, index=True)
type = peewee.ForeignKeyField(OverrideType)
class ImageStatus(BaseModel):
name = peewee.CharField(unique=True)
class Image(BaseModel):
url = peewee.CharField(unique=True)
source = peewee.CharField()
data = peewee.BlobField()
status = peewee.ForeignKeyField(ImageStatus)
notes = peewee.TextField(null=True)
###############################################################################
# Helper Functions
###############################################################################
def async_write(buffer=[]):
item = queue.get()
buffer.append(item)
if len(buffer) > 500 or queue.empty():
log.debug('Processing {} queue items.'.format(len(buffer)))
with db.transaction():
write_buffer(buffer)
buffer.clear()
def write_buffer(buffer):
for item in buffer:
try:
item['fn'](*item.get('args', ()), **item.get('kw', {}))
except:
log.exception(
'Exception while processing queue item: {}'
.format(item))
queue.task_done()
def create_tables(*tables):
for table in tables:
eval(table).create_table()
def connect(dbpath):
log.info('Connecting to the database at {}'.format(dbpath))
db.initialize(peewee.SqliteDatabase(dbpath))
db.connect()
###############################################################################
# Macros
###############################################################################
def votes_by_user(user):
up, down = [], []
for vote in (Vote.select().join(User).where(User.name == user)):
if vote.value == 1:
up.append(vote.page.url)
else:
down.append(vote.page.url)
return {'+': up, '-': down}
|
mit
| -6,637,355,382,847,117,000
| 27.66
| 79
| 0.552512
| false
| 4.030942
| false
| false
| false
|
jpopelka/atomic-reactor
|
atomic_reactor/plugins/exit_sendmail.py
|
1
|
11470
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from email.mime.text import MIMEText
import os
import smtplib
import socket
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from dockerfile_parse import DockerfileParser
import requests
from atomic_reactor.plugin import ExitPlugin, PluginFailedException
from atomic_reactor.plugins.pre_check_and_set_rebuild import is_rebuild
from atomic_reactor.source import GitSource
class SendMailPlugin(ExitPlugin):
"""This plugins sends notifications about build results.
Example configuration (see arguments for init for detailed explanation):
"exit_plugins": [{
"name": "sendmail",
"args": {
"send_on": ["auto_canceled", "auto_fail"],
"url": "https://openshift-instance.com",
"pdc_url": "https://pdc-instance.com",
# pdc_secret_path is filled in automatically by osbs-client
"pdc_secret_path": "/path/to/file/with/pdc/token",
"smtp_uri": "smtp-server.com",
"from_address": "osbs@mycompany.com",
"error_addresses": ["admin@mycompany.com"],
# optional arguments follow
"submitter": "John Smith <jsmith@mycompany.com>",
"pdc_verify_cert": true,
"pdc_component_df_label": "BZComponent",
"pdc_contact_role": "Devel_Owner"
}
}]
"""
key = "sendmail"
# symbolic constants for states
MANUAL_SUCCESS = 'manual_success'
MANUAL_FAIL = 'manual_fail'
AUTO_SUCCESS = 'auto_success'
AUTO_FAIL = 'auto_fail'
AUTO_CANCELED = 'auto_canceled'
allowed_states = set([MANUAL_SUCCESS, MANUAL_FAIL, AUTO_SUCCESS, AUTO_FAIL, AUTO_CANCELED])
PDC_TOKEN_FILE = 'pdc.token'
PDC_CONTACT_ROLE = 'Devel_Owner'
def __init__(self, tasker, workflow, send_on=None, url=None, submitter='unknown', pdc_url=None,
pdc_verify_cert=True, pdc_component_df_label="BZComponent", pdc_secret_path=None,
pdc_contact_role=None, smtp_uri=None, from_address=None,
error_addresses=None):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param send_on: list of build states when a notification should be sent
:param url: URL to OSv3 instance where the build logs are stored
:param submitter: name of user who submitted a build (plain string)
:param pdc_url: URL of PDC to query for contact information
:param pdc_verify_cert: whether or not to verify SSL cert of PDC (defaults to True)
:param pdc_component_df_label: name of Dockerfile label to use as PDC global_component
:param pdc_secret_path: path to pdc.token file; $SOURCE_SECRET_PATH otherwise
:param pdc_contact_role: name of PDC role to contact
:param smtp_uri: URL of SMTP server to use to send the message (e.g. "foo.com:25")
:param from_address: the "From" of the notification email
:param error_addresses: list of email addresses where to send an email if there's an error
(e.g. if we can't find out who to notify about the failed build)
"""
super(SendMailPlugin, self).__init__(tasker, workflow)
self.send_on = send_on
self.url = url
self.submitter = submitter
self.pdc_url = pdc_url
self.pdc_verify_cert = pdc_verify_cert
self.pdc_component_df_label = pdc_component_df_label
self.pdc_secret_path = pdc_secret_path
self.pdc_contact_role = pdc_contact_role or self.PDC_CONTACT_ROLE
self.smtp_uri = smtp_uri
self.from_address = from_address
self.error_addresses = error_addresses
def _should_send(self, rebuild, success, canceled):
"""Return True if any state in `self.send_on` meets given conditions, thus meaning
that a notification mail should be sent.
"""
should_send = False
should_send_mapping = {
self.MANUAL_SUCCESS: not rebuild and success,
self.MANUAL_FAIL: not rebuild and not success,
self.AUTO_SUCCESS: rebuild and success,
self.AUTO_FAIL: rebuild and not success,
self.AUTO_CANCELED: rebuild and canceled
}
for state in self.send_on:
should_send |= should_send_mapping[state]
return should_send
def _render_mail(self, rebuild, success, canceled):
"""Render and return subject and body of the mail to send."""
subject_template = 'Image %(image)s; Status %(endstate)s; Submitted by %(user)s'
body_template = '\n'.join([
'Image: %(image)s',
'Status: %(endstate)s',
'Submitted by: %(user)s',
'Logs: %(logs)s',
])
endstate = None
if canceled:
endstate = 'canceled'
else:
endstate = 'successful' if success else 'failed'
url = None
if self.url and self.workflow.openshift_build_selflink:
url = urljoin(self.url, self.workflow.openshift_build_selflink + '/log')
formatting_dict = {
'image': self.workflow.image,
'endstate': endstate,
'user': '<autorebuild>' if rebuild else self.submitter,
'logs': url
}
return (subject_template % formatting_dict, body_template % formatting_dict)
def _get_pdc_token(self):
# we want to allow pdc_secret_path to be None in __init__ - I'm assuming that in future
# we'll want different sources of contact info, so we only want to raise when
# the plugin actually tries to authenticate against PDC and doesn't have pdc_secret_path
if self.pdc_secret_path is None:
raise PluginFailedException('Getting PDC token, but pdc_secret_path is unspecified')
token_file = os.path.join(self.pdc_secret_path, self.PDC_TOKEN_FILE)
self.log.debug('getting PDC token from file %s', token_file)
with open(token_file, 'r') as f:
return f.read().strip()
def _get_component_label(self):
"""Get value of Dockerfile label that is to be used as `global_component` to query
PDC release-components API endpoint.
"""
labels = DockerfileParser(self.workflow.builder.df_path).labels
if self.pdc_component_df_label not in labels:
raise PluginFailedException('No %s label in Dockerfile, can\'t get PDC component',
self.pdc_component_df_label)
return labels[self.pdc_component_df_label]
def _get_receivers_list(self):
"""Return list of receivers of the notification.
:raises RuntimeError: if PDC can't be contacted or doesn't provide sufficient data
:raises PluginFailedException: if there's a critical error while getting PDC data
"""
# TODO: document what this plugin expects to be in Dockerfile/where it gets info from
global_component = self._get_component_label()
# this relies on bump_release plugin configuring source.git_commit to actually be
# branch name, not a commit
if not isinstance(self.workflow.source, GitSource):
raise PluginFailedException('Source is not of type "GitSource", panic!')
git_branch = self.workflow.source.git_commit
try:
r = requests.get(urljoin(self.pdc_url, 'rest_api/v1/release-component-contacts/'),
headers={'Authorization': 'Token %s' % self._get_pdc_token()},
params={'global_component': global_component,
'dist_git_branch': git_branch,
'role': self.pdc_contact_role},
verify=self.pdc_verify_cert)
except requests.RequestException as e:
self.log.error('failed to connect to PDC: %s', str(e))
raise RuntimeError(e)
if r.status_code != 200:
self.log.error('PDC returned status code %s, full response: %s',
r.status_code, r.text)
raise RuntimeError('PDC returned non-200 status code (%s), see referenced build log' %
r.status_code)
contacts = r.json()
if contacts['count'] == 0:
self.log.error('no %s role for the component', self.pdc_contact_role)
raise RuntimeError('no %s role for the component' % self.pdc_contact_role)
send_to = []
for contact in contacts['results']:
send_to.append(contact['contact']['email'])
return send_to
def _send_mail(self, receivers_list, subject, body):
"""Actually sends the mail with `subject` and `body` to all members of `receivers_list`."""
msg = MIMEText(body)
msg['Subject'] = subject
msg['From'] = self.from_address
msg['To'] = ', '.join(receivers_list)
s = None
try:
s = smtplib.SMTP(self.smtp_uri)
s.sendmail(self.from_address, receivers_list, msg.as_string())
except (socket.gaierror, smtplib.SMTPException) as e:
raise PluginFailedException('Error communicating with SMTP server: %s' % str(e))
finally:
if s is not None:
s.quit()
def run(self):
# verify that given states are subset of allowed states
unknown_states = set(self.send_on) - self.allowed_states
if len(unknown_states) > 0:
raise PluginFailedException('Unknown state(s) "%s" for sendmail plugin' %
'", "'.join(sorted(unknown_states)))
rebuild = is_rebuild(self.workflow)
success = not self.workflow.build_failed
canceled = self.workflow.autorebuild_canceled
self.log.info('checking conditions for sending notification ...')
if self._should_send(rebuild, success, canceled):
self.log.info('notification about build result will be sent')
subject, body = self._render_mail(rebuild, success, canceled)
try:
self.log.debug('getting list of receivers for this component ...')
receivers = self._get_receivers_list()
except RuntimeError as e:
self.log.error('couldn\'t get list of receivers, sending error message ...')
# TODO: maybe improve the error message/subject
body = '\n'.join([
'Failed to get contact for %s, error: %s' % (str(self.workflow.image), str(e)),
'Since your address is in "error_addresses", this email was sent to you to '
'take action on this.',
'Wanted to send following mail:',
'',
body
])
receivers = self.error_addresses
self.log.info('sending notification to %s ...', receivers)
self._send_mail(receivers, subject, body)
else:
self.log.info('conditions for sending notification not met, doing nothing')
|
bsd-3-clause
| 1,448,351,702,517,134,300
| 43.285714
| 99
| 0.599128
| false
| 4.096429
| false
| false
| false
|
DevynCJohnson/Pybooster
|
pylib/multimedia.py
|
1
|
14498
|
#!/usr/bin/env python3
# -*- coding: utf-8; Mode: Python; indent-tabs-mode: nil; tab-width: 4 -*-
# vim: set fileencoding=utf-8 filetype=python syntax=python.doxygen fileformat=unix tabstop=4 expandtab :
# kate: encoding utf-8; bom off; syntax python; indent-mode python; eol unix; replace-tabs off; indent-width 4; tab-width 4; remove-trailing-space on;
"""@brief Library for multimedia manipulation.
@file multimedia.py
@package pybooster.multimedia
@version 2019.07.14
@author Devyn Collier Johnson <DevynCJohnson@Gmail.com>
@copyright LGPLv3
@section HELPFUL DOCUMENTATION
FFmpeg
- FFmpeg Codecs: https://ffmpeg.org/ffmpeg-codecs.html
- FFmpeg Filters: https://ffmpeg.org/ffmpeg-filters.html
@section LICENSE
GNU Lesser General Public License v3
Copyright (c) Devyn Collier Johnson, All rights reserved.
This software is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software.
"""
from array import array
from multiprocessing import cpu_count
import shlex
from subprocess import PIPE, Popen # nosec
from tempfile import NamedTemporaryFile
import wave
from pybooster.fs import doesfileexist, getfilename
from pybooster.iterables import mergeoddeven
from pybooster.system import is_program_aval
PYGAME_IMPORTED: bool = False
try:
from pygame.mixer import init, music
PYGAME_IMPORTED = True
except ImportError:
pass
__all__: list = [
# GLOBAL CONSTANTS #
r'FFMPEG',
# AUDIO #
r'merge2rawwav',
r'openwavfile',
r'openmp3file',
r'writewavfile',
r'playmusic',
r'audioimg2mp4',
# AUDIO CONVERSIONS #
r'to_aac',
r'to_ac3',
r'to_ac3_fixed',
r'to_flac',
r'to_mp3',
r'to_ogg',
r'to_opus',
r'to_wav_tmp',
r'to_wav',
r'mp3_to_wav',
r'wav_to_mp3'
]
# GLOBAL CONSTANTS #
FFMPEG: str = r'ffmpeg -y -hide_banner -loglevel panic -sn -vn'
# AUDIO #
def merge2rawwav(_wav_data: dict) -> bytes:
"""Merge the split WAV channels back together and convert the data to the original raw WAV format."""
if _wav_data[r'num_channels'] == 2:
return mergeoddeven(_wav_data[r'left_audio'], _wav_data[r'right_audio']).tobytes()
return _wav_data[r'data'].tobytes()
def openwavfile(_filename: str) -> dict:
"""Get the contents of the specified WAV file and return the data as a list of integers in a dictionary describing the data."""
_wav_data: list = []
with wave.open(_filename, mode=r'rb') as _file:
_wav_data.append(_file.readframes(_file.getnframes()))
_out: dict = {
r'num_frames': _file.getnframes(),
r'frame_rate': _file.getframerate(),
r'num_channels': _file.getnchannels(),
r'sample_width': _file.getsampwidth()
}
if _out[r'sample_width'] == 1: # 8-bit
_out[r'data'] = array(r'b', _wav_data[0])
elif _out[r'sample_width'] == 2: # 16-bit
_out[r'data'] = array(r'h', _wav_data[0])
elif _out[r'sample_width'] == 4: # 32-bit
_out[r'data'] = array(r'l', _wav_data[0])
if _out[r'num_channels'] == 2:
_out[r'left_audio'] = _out[r'data'][0::2]
_out[r'right_audio'] = _out[r'data'][1::2]
return _out
def openmp3file(_filename: str) -> dict:
"""Get the contents of the specified MP3 file and return the data as a list of integers in a dictionary describing the data."""
_wav_data: list = []
_tmpfile = NamedTemporaryFile()
if not to_wav_tmp(_filename, _tmpfile.name):
raise Exception(r'Failed to convert MP3 file to WAV!')
with wave.open(_tmpfile.name, mode=r'rb') as _file:
_wav_data.append(_file.readframes(_file.getnframes()))
_out: dict = {
r'num_frames': _file.getnframes(),
r'frame_rate': _file.getframerate(),
r'num_channels': _file.getnchannels(),
r'sample_width': _file.getsampwidth()
}
if _out[r'sample_width'] == 1: # 8-bit
_out[r'data'] = array(r'b', _wav_data[0])
elif _out[r'sample_width'] == 2: # 16-bit
_out[r'data'] = array(r'h', _wav_data[0])
elif _out[r'sample_width'] == 4: # 32-bit
_out[r'data'] = array(r'l', _wav_data[0])
if _out[r'num_channels'] == 2:
_out[r'left_audio'] = _out[r'data'][0::2]
_out[r'right_audio'] = _out[r'data'][1::2]
return _out
def writewavfile(_wav_data: dict, _filename: str) -> None:
"""Write a WAV file using data in the given WAV data dictionary."""
with wave.open(_filename, mode=r'wb') as _file:
_file.setparams((_wav_data[r'num_channels'], _wav_data[r'sample_width'], _wav_data[r'frame_rate'], _wav_data[r'num_frames'], r'NONE', r'not compressed')) # pylint: disable=E1101
_file.writeframes(merge2rawwav(_wav_data)) # pylint: disable=E1101
def playmusic(_filename: str) -> None: # noqa: R701
"""Play an MP3, WAV, or other audio files."""
if PYGAME_IMPORTED:
init()
music.load(_filename)
music.play()
while music.get_busy() is True:
continue
elif is_program_aval(r'ffplay'):
_process = Popen(shlex.split(r'ffplay -hide_banner -loglevel panic -sn -vn -nodisp ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif is_program_aval(r'play'):
_process = Popen(shlex.split(r'play -q -V1 ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif _filename.endswith(r'.mp3') and is_program_aval(r'mpeg321'):
_process = Popen(shlex.split(r'mpg321 --quiet ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif _filename.endswith(r'.mp3') and is_program_aval(r'mpg123'):
_process = Popen(shlex.split(r'mpg123 --quiet ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif _filename.endswith(r'.ogg') and is_program_aval(r'ogg123'):
_process = Popen(shlex.split(r'ogg123 --quiet ' + _filename), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
def audioimg2mp4(_audio_filename: str, _img_filename: str, sample_rate: int = 44100) -> bool:
"""Create an MP4 video given an audio file & an image file; Return True if successful."""
if doesfileexist(_img_filename) and doesfileexist(_audio_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
core_count = r'-threads ' + str(cpu_count())
hw_params = r'-hwaccel cuvid ' + core_count if is_program_aval(r'nvidia-smi') else r'-hwaccel vaapi ' + core_count
_process = Popen(shlex.split(r'ffmpeg -y -hide_banner -loglevel panic ' + hw_params + r' -thread_queue_size 4096 -probesize 20M -analyzeduration 20M -i ' + _img_filename + r' -i ' + _audio_filename + r' -c:v libx264 -crf 15 -tune stillimage -vf scale=2560:1440 -c:a libmp3lame -b:a 320000 -ar ' + str(sample_rate) + r' -compression_level 0 ' + getfilename(_audio_filename) + r'_merged.mp4'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
# AUDIO CONVERSIONS #
def to_aac(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an AAC file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a libfaac -ar ' + str(sample_rate) + r' -f aac ' + getfilename(_filename) + r'.aac'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_ac3(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an AC3 file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a ac3 -ar ' + str(sample_rate) + r' -f ac3 ' + getfilename(_filename) + r'.ac3'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_ac3_fixed(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an AC3 (Fixed) file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a ac3_fixed -ar ' + str(sample_rate) + r' -f ac3 ' + getfilename(_filename) + r'.ac3'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_flac(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to a Flac file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a flac -ar ' + str(sample_rate) + r' -compression_level 12 -f flac ' + getfilename(_filename) + r'.flac'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_mp3(_filename: str, bitrate: int = 320000, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an MP3 file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a libmp3lame -b:a ' + str(bitrate) + r' -ar ' + str(sample_rate) + r' -compression_level 0 -f mp3 ' + getfilename(_filename) + r'.mp3'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_ogg(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to an OGG file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a libvorbis -ar ' + str(sample_rate) + r' -f ogg ' + getfilename(_filename) + r'.ogg'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_opus(_filename: str) -> bool:
"""Convert an audio file to an OPUS file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a libopus -compression_level 10 -f opus ' + getfilename(_filename) + r'.opus'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_wav_tmp(_filename: str, _tmpname: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to a WAV file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a pcm_s16le -ar ' + str(sample_rate) + r' -f wav ' + _tmpname), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def to_wav(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an audio file to a WAV file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
_process = Popen(shlex.split(FFMPEG + r' -i ' + _filename + r' -codec:a pcm_s16le -ar ' + str(sample_rate) + r' -f wav ' + getfilename(_filename) + r'.wav'), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def mp3_to_wav(_filename: str, sample_rate: int = 44100) -> bool:
"""Convert an MP3 file to a WAV file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
return to_wav(_filename, sample_rate)
if is_program_aval(r'mpeg321'):
_process = Popen(shlex.split(r'mpg321 --quiet --stereo --wav ' + _filename + r' ' + _filename.replace(r'.mp3', r'.wav')), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
elif is_program_aval(r'mpg123'):
_process = Popen(shlex.split(r'mpg123 --quiet --stereo --wav ' + _filename + r' ' + _filename.replace(r'.mp3', r'.wav')), stdout=PIPE, stderr=PIPE)
_stdout, _stderr = _process.communicate()
else:
return False
# Check for success
if not _stderr:
return True
return False
def wav_to_mp3(_filename: str, bitrate: int = 320000, sample_rate: int = 44100) -> bool:
"""Convert a WAV file to an MP3 file; Return True if successful."""
if doesfileexist(_filename):
# Conversion
if is_program_aval(r'ffmpeg'):
return to_mp3(_filename, bitrate, sample_rate)
return False
|
lgpl-3.0
| -5,862,337,393,570,860,000
| 39.272222
| 429
| 0.618016
| false
| 3.346722
| false
| false
| false
|
gbenson/i8c
|
tests/test_compiler_driver.py
|
1
|
4941
|
# -*- coding: utf-8 -*-
# Copyright (C) 2015-16 Red Hat, Inc.
# This file is part of the Infinity Note Compiler.
#
# The Infinity Note Compiler is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# The Infinity Note Compiler is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the Infinity Note Compiler. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from tests import TestCase
from i8c.compiler import I8CError
from i8c.compiler.driver import main
import os
import subprocess
import sys
import tempfile
SOURCE = """\
define test::func
return
"""
class TestCompilerDriver(TestCase):
"""Test i8c.compiler.driver.main.
This testcase should be the bare minumum to exercise the function
i8c.compiler.driver.main and its helper functions. Command line
processing tests should be in test_commandline_processor.py, and
tests exercising the compiler generally (i8c.compiler.compile)
should be in their own files.
"""
def setUp(self):
# Set up a working directory
self.workdir = tempfile.mkdtemp()
self.filebase = os.path.join(self.workdir, "test")
self.infile = self.filebase + ".i8"
with open(self.infile, "w") as fp:
fp.write(SOURCE)
# Pipe stderr to a file
tmpfile = os.path.join(self.workdir, "stderr")
self.stderr_fd = os.open(tmpfile,
os.O_RDWR | os.O_CREAT | os.O_EXCL,
0o600)
sys.stderr.flush()
self.saved_stderr_fd = os.dup(2)
os.dup2(self.stderr_fd, 2)
def tearDown(self):
# Restore stderr
sys.stderr.flush()
os.dup2(self.saved_stderr_fd, 2)
os.close(self.saved_stderr_fd)
os.close(self.stderr_fd)
# Delete the working directory
os.chmod(self.workdir, 0o700)
subprocess.call(("rm", "-rf", self.workdir))
# Test all specifiable permutations of (with_cpp,with_i8c,with_asm)
def __run_permtest(self, args, outext):
self.outfile = self.filebase + outext
if "-E" in args:
args.extend(("-o", self.outfile))
args.append(self.infile)
self.assertFalse(os.path.exists(self.outfile))
status = main(args)
self.assertIs(status, None)
self.assertTrue(os.path.isfile(self.outfile))
junk = os.path.join(self.workdir, "-.o")
self.assertFalse(os.path.exists(junk))
def test_do_nothing(self):
"""Check that -E -fpreprocessed is rejected."""
self.assertRaises(I8CError, main, ["-E", "-fpreprocessed"])
def test_pp_to_asm(self):
"""Check that preprocessed source to assembly works."""
self.__run_permtest(["-S", "-fpreprocessed"], ".S")
def test_pp_to_obj(self):
"""Check that preprocessed source to object code works."""
self.__run_permtest(["-fpreprocessed", "-c"], ".o")
def test_i8_to_pp(self):
"""Check that i8 source to preprocessed source works."""
self.__run_permtest(["-E"], ".i8p")
def test_i8_to_asm(self):
"""Check that i8 source to assembly works."""
self.__run_permtest(["-S"], ".S")
def test_i8_to_obj(self):
"""Check that i8 source to object code works."""
self.__run_permtest(["-c"], ".o")
# Test that GCC errors are handled correctly
def __run_failtest(self):
status = main(["-c", self.infile])
self.assertIsNot(status, None)
size = os.lseek(self.stderr_fd, 0, 1)
os.lseek(self.stderr_fd, 0, 0)
output = os.read(self.stderr_fd, size).decode("utf-8")
self.assertGreaterEqual(output.find("error:"), 0)
def test_cpp_failure(self):
"""Check that preprocessor errors are handled correctly."""
os.unlink(self.infile)
self.__run_failtest()
def test_asm_failure(self):
"""Check that assembler errors are handled correctly."""
os.chmod(self.workdir, 0o500)
self.__run_failtest()
# Test that multiple input files with no output file is caught
def test_multi_input_no_output(self):
"""Check that unguessable output filenames are handled."""
infile2 = os.path.join(self.workdir, "test2.i8")
open(infile2, "w")
self.assertRaises(I8CError,
self.__run_permtest, ["-c", infile2], ".o")
|
lgpl-2.1
| -4,894,711,709,997,495,000
| 34.804348
| 71
| 0.632463
| false
| 3.701124
| true
| false
| false
|
sequana/sequana
|
sequana/genbank.py
|
1
|
5965
|
# -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import re
from sequana.annotations import Annotation
from sequana.fasta import FastA
import colorlog
logger = colorlog.getLogger(__name__)
__all__ = ["GenBank"]
# TODO: we should factorise gff and genbank in a parent class (Annotation)
class GenBank(Annotation):
"""
::
gg = GenBank()
gg.get_types()
"""
def __init__(self, filename):
super(GenBank, self).__init__(filename)
def get_types(self):
records = self.genbank_features_parser()
_types = set()
for contig in records.keys():
for feature in records[contig]:
_type = feature['type']
_types.add(_type)
return sorted(_types)
def extract_fasta(self, fastafile, features=['rRNA']):
types = self.get_types()
for feature in features:
if feature not in types:
raise ValueError("{} not found".format(feature))
# fasta may have several contig/chromosome names
# the gene bank should be compatible !!
fasta = FastA(fastafile)
contig_names = fasta.get_lengths_as_dict()
# most of the times, the version is not in the gbk
contig_names = [x.split(".")[0] for x in contig_names]
# then we read the features from the genbank
records = self.genbank_features_parser()
contig_names_gbk = list(records.keys())
# FIXME FastA is not very efficient for eukaryotes but is enough for now
output = ""
for name in records.keys():
if name not in contig_names:
logger.warning("{} contig from genbank not found in fasta".format(name))
continue
index = contig_names.index(name)
sequence = fasta.sequences[index]
for item in records[name]:
if item['type'] in features:
start, end = item['gene_start'], item['gene_end']
try:
info = item['product']
output += ">{}_{}_{}_{} {}\n".format(name, item['type'],
start,end, info)
except:
output += ">{}_{}_{}_{} {}\n".format(name, item['type'], start, end)
output+= "{}\n".format(sequence[start:end])
return output
def genbank_features_parser(self):
""" Return dictionary with features contains inside a genbank file.
:param str input_filename: genbank formated file
"""
new_feature = {}
records = {}
feature_list = []
feature_field = False
with open(self.filename, "r") as fp:
for line in fp:
# pass header and sequence fields
if not feature_field:
# get contig/chrom name
if line.startswith("LOCUS"):
name = line.split()[1]
elif line.startswith("FEATURE"):
feature_field = True
else:
# if feature field is finished
if line.startswith("ORIGIN"):
feature_field = False
records[name] = feature_list
feature_list = []
new_feature = []
continue
# if there are a word in qualifier indent (feature type)
# maybe we need to infer the size of indentation ???
if line[0:20].split():
if new_feature:
feature_list.append(new_feature)
split_line = line.split()
t = split_line[0]
# Handle :
#complement(order(1449596..1449640,1449647..1450684,
#1450695..1450700))
positions = split_line[1]
if positions[0].isalpha():
while not line[:-1].endswith(")"):
line = next(fp)
positions += line
pos = [int(n) for n in re.findall(r"\d+", positions)]
# Handle complement(join(3773333..3774355,3774357..3774431))
start = pos[0]
end = pos[-1]
strand = "-" if split_line[1].startswith("c") else "+"
new_feature = {"type": t, "gene_start": start,
"gene_end": end, "strand": strand}
# recover qualifier bound with feature
else:
quali_line = line.strip().replace('"', '')
if quali_line.startswith("/") and "=" in quali_line:
qualifier = quali_line.split("=")
key = qualifier[0][1:]
new_feature[key] = qualifier[1]
else:
if key == "translation":
new_feature[key] += quali_line
else:
new_feature[key] += " " + quali_line
return records
|
bsd-3-clause
| -5,432,652,271,706,455,000
| 36.753165
| 92
| 0.474099
| false
| 4.570881
| false
| false
| false
|
jplu/fasttext-for-linking
|
fasttext_app.py
|
1
|
2105
|
import fasttext
import gensim
import numpy
import flask
import spacy
import argparse
import os
import operator
import collections
app = flask.Flask(__name__)
def file_exists(x):
if not os.path.isfile(x):
import argparse
raise argparse.ArgumentTypeError("{0} is not a file".format(x))
return x
def init(args):
global model
global nlp
model = fasttext.load_model(args.model)
nlp = spacy.load(args.language)
@app.route('/fasttext', methods=['POST'])
def fasttext_sim():
if not flask.request.json or not 'entities' in flask.request.json or not 'text' in flask.request.json or not 'mention' in flask.request.json:
flask.abort(400)
scores = {}
clean_text = [token.orth_ for token in nlp(flask.request.json['text']) if not (token.is_punct or token.is_stop or token.is_space or token.orth_ == flask.request.json['mention'])]
for entity in flask.request.json['entities']:
clean_entity = [token.orth_ for token in nlp(entity) if not (token.is_punct or token.is_stop or token.is_space)]
v1 = model["_".join(clean_entity).lower()]
v2 = [model[word.lower()] for word in clean_text]
if v1 and v2:
scores[entity] = numpy.dot(gensim.matutils.unitvec(numpy.array(v1).mean(axis=0)), gensim.matutils.unitvec(numpy.array(v2).mean(axis=0)))
else:
scores[entity] = 0.0
sorted_scores = collections.OrderedDict(sorted(scores.items(), key=operator.itemgetter(1), reverse=True))
return flask.jsonify(sorted_scores), 200
def main():
parser = argparse.ArgumentParser(description="Webapp for entity linking using fastText in a given language", prog="fasttext_app")
parser.add_argument("-l", "--language", required=True, help="Set the language")
parser.add_argument("-m", "--model", required=True, type=file_exists, help="Set the fastText model")
parser.add_argument("-p", "--port", required=True, type=int, help="Set the port")
parser.add_argument('--version', action='version', version='%(prog)s 1.0.0')
args = parser.parse_args()
init(args)
app.config["JSON_SORT_KEYS"] = False
app.run(host='0.0.0.0', port=args.port)
if __name__ == '__main__':
main()
|
apache-2.0
| 5,421,315,234,155,714,000
| 33.508197
| 179
| 0.703563
| false
| 3.100147
| false
| false
| false
|
bwesterb/pol
|
src/blockcipher.py
|
1
|
2756
|
""" Implementation of the block ciphers """
import logging
import pol.serialization
import Crypto.Cipher.AES
import Crypto.Util.Counter
l = logging.getLogger(__name__)
class BlockCipherParameterError(ValueError):
pass
class BaseStream(object):
def encrypt(self, s):
raise NotImplementedError
def decrypt(self, s):
raise NotImplementedError
class BlockCipher(object):
""" Encrypts blocks with a fixed key. """
def __init__(self, params):
""" Initialize the BlockCipher with the given parameters.
NOTE use BlockCipher.setup """
self.params = params
@staticmethod
def setup(params=None):
""" Set-up the blockcipher given by `params`. """
if params is None:
params = {'type': 'aes',
'bits': 256 }
if ('type' not in params or not isinstance(params['type'], basestring)
or params['type'] not in TYPE_MAP):
raise BlockCipherParameterError("Invalid `type' attribute")
return TYPE_MAP[params['type']](params)
@property
def blocksize(self):
""" blocksize in bytes """
raise NotImplementedError
@property
def keysize(self):
""" size of key in bytes """
raise NotImplementedError
def new_stream(self, key, iv, offset=0):
raise NotImplementedError
class _AESStream(BaseStream):
def __init__(self, cipher):
self.cipher = cipher
def encrypt(self, s):
return self.cipher.encrypt(s)
def decrypt(self, s):
return self.cipher.decrypt(s)
class AESBlockCipher(BlockCipher):
""" AES is the default blockcipher """
def __init__(self, params):
super(AESBlockCipher, self).__init__(params)
if not 'bits' in params or params['bits'] not in (256, ):
raise KeyStretchingParameterError("Invalid param `bits'")
self.bits = params['bits']
def new_stream(self, key, iv, offset=0):
if offset % 16 != 0:
raise ValueError("`offset' should be a multiple of 16")
if len(key) * 8 != self.bits:
raise ValueError("`key' should be %s long" % (self.bits/8))
if len(iv) != 16:
raise ValueError("`iv' should be 16 bytes long")
ctr = Crypto.Util.Counter.new(128,
initial_value=pol.serialization.string_to_number(iv)
+ offset/16)
cipher = Crypto.Cipher.AES.new(key, Crypto.Cipher.AES.MODE_CTR,
counter=ctr)
return _AESStream(cipher)
@property
def blocksize(self):
return 16
@property
def keysize(self):
return self.bits / 8
TYPE_MAP = {'aes': AESBlockCipher}
|
gpl-3.0
| 7,931,594,299,527,833,000
| 28.956522
| 78
| 0.592163
| false
| 4.233487
| false
| false
| false
|
a-e/csvsee
|
csvsee/utils.py
|
1
|
15673
|
# utils.py
"""Shared utility functions for the csvsee library.
"""
import csv
import re
import sys
from datetime import datetime, timedelta
from csvsee import dates
class NoMatch (Exception):
"""Exception raised when no column name matches a given expression."""
pass
def float_or_0(value):
"""Try to convert ``value`` to a floating-point number. If
conversion fails, return ``0``.
Examples::
>>> float_or_0(5)
5.0
>>> float_or_0('5')
5.0
>>> float_or_0('five')
0
"""
try:
return float(value)
except ValueError:
return 0
def column_names(csv_file):
"""Return a list of column names in the given ``.csv`` file.
"""
reader = csv.DictReader(open(csv_file, 'r'))
return reader.fieldnames
def strip_prefix(strings):
"""Strip a common prefix from a sequence of strings.
Return ``(prefix, [stripped])`` where ``prefix`` is the string that is
common (with leading and trailing whitespace removed), and ``[stripped]``
is all strings with the prefix removed.
Examples::
>>> strip_prefix(['first', 'fourth', 'fifth'])
('f', ['irst', 'ourth', 'ifth'])
>>> strip_prefix(['spam and eggs', 'spam and potatoes', 'spam and spam'])
('spam and', ['eggs', 'potatoes', 'spam'])
"""
prefix = ''
# Group all first letters, then all second letters, etc.
# letters list will be the same length as the shortest string
for letters in zip(*strings):
# If all letters are the same, append to common prefix
if len(set(letters)) == 1:
prefix += letters[0]
else:
break
# Keep everything after the index where the strings diverge
index = len(prefix)
stripped = [s[index:] for s in strings]
return (prefix.strip(), stripped)
def grep_files(filenames, matches, dateformat='guess', resolution=60,
show_progress=True):
"""Search all the given files for matching text, and return a list of
``(timestamp, counts)`` for each match, where ``timestamp`` is a
``datetime``, and ``counts`` is a dictionary of ``{match: count}``,
counting the number of times each match was found during intervals of
``resolution`` seconds.
"""
# Counts of each match, used as a template for each row
row_temp = [(match, 0) for match in matches]
rows = {}
# Compile regular expressions for matches
# (Shaves off a little bit of execution time)
compiled_matches = [re.compile(expr) for expr in matches]
# Read each line of each file
for filename in filenames:
# Show progress bar?
if show_progress:
num_lines = line_count(filename)
progress = ProgressBar(num_lines, prefix=filename, units='lines')
# No progress bar, just print the filename being read
else:
print("Reading %s" % filename)
# Guess date format?
if not dateformat or dateformat == 'guess':
dateformat = dates.guess_file_date_format(filename)
# HACK: Fake timestamp in case no real timestamps are ever found
timestamp = datetime(1970, 1, 1)
# What line number are we on?
line_num = 0
for line in open(filename, 'r'):
line_num += 1
# Update progress bar every 1000 lines
if show_progress:
if line_num % 1000 == 0 or line_num == num_lines:
progress.update(line_num)
sys.stdout.write('\r' + str(progress))
sys.stdout.flush()
# Remove leading/trailing whitespace and newlines
line = line.strip()
# If line is empty, skip it
if not line:
continue
# See if this line has a timestamp
try:
line_timestamp = dates.date_chop(line, dateformat, resolution)
# No timestamp found, stick with the current one
except dates.CannotParse:
pass
# New timestamp found, switch to it
else:
timestamp = line_timestamp
# If this datestamp hasn't appeared before, add it
if timestamp not in rows:
rows[timestamp] = dict(row_temp)
# Count the number of each match in this line
for expr in compiled_matches:
if expr.search(line):
rows[timestamp][expr.pattern] += 1
# If using progress bar, print a newline
if show_progress:
sys.stdout.write('\n')
# Return a sorted list of (match, {counts}) tuples
return sorted(rows.iteritems())
def top_by(func, count, y_columns, y_values, drop=0):
"""Apply ``func`` to each column, and return the top ``count`` column
names. Arguments:
func
A function that takes a list of values and returns a single value.
`max`, `min`, and average are good examples.
count
How many of the "top" values to keep
y_columns
A list of candidate column names. All of these must
exist as keys in ``y_values``
y_values
Dictionary of ``{column: values}`` for each y-column. Must have
data for each column in ``y_columns`` (any extra column data will
be ignored).
drop
How many top values to skip before returning the next
``count`` top columns
"""
# List of (func(ys), y_name)
results = []
for y_name in y_columns:
f_ys = func(y_values[y_name])
results.append((f_ys, y_name))
# Keep the top ``count`` after dropping ``drop`` values
sorted_columns = [y_name for (f_ys, y_name) in reversed(sorted(results))]
return sorted_columns[drop:drop + count]
def top_by_average(count, y_columns, y_values, drop=0):
"""Determine the top ``count`` columns based on the average of values
in ``y_values``, and return the filtered ``y_columns`` names.
"""
def avg(values):
return float(sum(values)) / len(values)
return top_by(avg, count, y_columns, y_values, drop)
def top_by_peak(count, y_columns, y_values, drop=0):
"""Determine the top ``count`` columns based on the peak value
in ``y_values``, and return the filtered ``y_columns`` names.
"""
return top_by(max, count, y_columns, y_values, drop)
def matching_fields(expr, fields):
"""Return all ``fields`` that match a regular expression ``expr``,
or raise a `NoMatch` exception if no matches are found.
Examples::
>>> matching_fields('a.*', ['apple', 'banana', 'avocado'])
['apple', 'avocado']
>>> matching_fields('a.*', ['peach', 'grape', 'kiwi'])
Traceback (most recent call last):
NoMatch: No matching column found for 'a.*'
"""
# Do backslash-escape of expressions
expr = expr.encode('unicode_escape')
# Find matching fields
matches = [field for field in fields if re.match(expr, field)]
# Return matches or raise a NoMatch exception
if matches:
return matches
else:
raise NoMatch("No matching column found for '%s'" % expr)
def matching_xy_fields(x_expr, y_exprs, fieldnames, verbose=False):
"""Match ``x_expr`` and ``y_exprs`` to all available column names in
``fieldnames``, and return the matched ``x_column`` and ``y_columns``.
Example::
>>> matching_xy_fields('x.*', ['y[12]', 'y[ab]'],
... ['xxx', 'y1', 'y2', 'y3', 'ya', 'yb', 'yc'])
('xxx', ['y1', 'y2', 'ya', 'yb'])
If ``x_expr`` is empty, the first column name is used::
>>> matching_xy_fields('', ['y[12]', 'y[ab]'],
... ['xxx', 'y1', 'y2', 'y3', 'ya', 'yb', 'yc'])
('xxx', ['y1', 'y2', 'ya', 'yb'])
If no match is found for any expression in ``y_exprs``, a `NoMatch`
exception is raised::
>>> matching_xy_fields('', ['y[12]', 'y[jk]'],
... ['xxx', 'y1', 'y2', 'y3', 'ya', 'yb', 'yc'])
Traceback (most recent call last):
NoMatch: No matching column found for 'y[jk]'
"""
# Make a copy of fieldnames
fieldnames = [field for field in fieldnames]
# If x_expr is provided, match on that.
if x_expr:
x_column = matching_fields(x_expr, fieldnames)[0]
# Otherwise, just take the first field.
else:
x_column = fieldnames[0]
#print("X-expression: '%s' matched column '%s'" % (x_expr, x_column))
# In any case, remove the x column from fieldnames so it
# won't be matched by any y-expression.
fieldnames.remove(x_column)
# Get all matching Y columns
y_columns = []
for y_expr in y_exprs:
matches = matching_fields(y_expr, fieldnames)
y_columns.extend(matches)
#print("Y-expression: '%s' matched these columns:" % y_expr)
#print('\n'.join(matches))
return (x_column, y_columns)
def read_xy_values(reader, x_column, y_columns,
date_format='', gmt_offset=0, zero_time=False):
"""Read values from a `csv.DictReader`, and return ``(x_values,
y_values)``. where ``x_values`` is a list of values found in ``x_column``,
and ``y_values`` is a dictionary of ``{y_column: [values]}`` for each
column in ``y_columns``.
Arguments:
x_column
Name of the column you want to use as the X axis.
y_columns
Names of columns you want to plot on the Y axis.
date_format
If given, treat values in ``x_column`` as timestamps
with the given format string.
gmt_offset
Add this many hours to every timestamp.
Only useful with ``date_format``.
zero_time
If ``True``, adjust timestamps so the earliest one starts at
``00:00`` (midnight). Only useful with ``date_format``.
"""
x_values = []
y_values = {}
for row in reader:
x_value = row[x_column]
# If X is supposed to be a date, try to convert it
try:
# FIXME: This could do weird things if the x-values
# are sometimes parseable as dates, and sometimes not
x_value = datetime.strptime(x_value, date_format) + \
timedelta(hours=gmt_offset)
# Otherwise, assume it's a floating-point numeric value
except ValueError:
x_value = float_or_0(x_value)
x_values.append(x_value)
# Append Y values from each column
for y_col in y_columns:
if y_col not in y_values:
y_values[y_col] = []
y_values[y_col].append(float_or_0(row[y_col]))
# Adjust datestamps to start at 0:00?
if date_format and zero_time:
z = min(x_values)
hms = timedelta(hours=z.hour, minutes=z.minute, seconds=z.second)
x_values = [x - hms for x in x_values]
return (x_values, y_values)
def line_count(filename):
"""Return the total number of lines in the given file.
"""
# Not terribly efficient but easy and good enough for now
return sum(1 for line in open(filename))
class ProgressBar:
"""An ASCII command-line progress bar with percentage.
Adapted from Corey Goldberg's version:
http://code.google.com/p/corey-projects/source/browse/trunk/python2/progress_bar.py
"""
def __init__(self, end, prefix='', fill='=', units='secs', width=40):
"""Create a progress bar with the given attributes.
"""
self.end = end
self.prog_bar = '[]'
self.prefix = prefix
self.fill = fill
self.units = units
self.width = width
self._update_amount(0)
def _update_amount(self, new_amount):
"""Update the progress bar with the percentage of completion.
"""
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) / 2) - len(str(percent_done))
pct_string = '%i%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def update(self, current):
"""Set the current progress.
"""
self._update_amount((current / float(self.end)) * 100.0)
self.prog_bar += ' %d/%d %s' % (current, self.end, self.units)
def __str__(self):
"""Return the progress bar as a string.
"""
return str(self.prefix + ' ' + self.prog_bar)
def filter_csv(csv_infile, csv_outfile, columns, match='regexp', action='include'):
"""Filter ``csv_infile`` and write output to ``csv_outfile``.
columns
A list of regular expressions or exact column names
match
``regexp`` to treat each value in ``columns`` as a regular
expression, ``exact`` to match exact literal column names
action
``include`` to keep the specified ``columns``, or ``exclude``
to keep all columns *except* the specified ``columns``
"""
# TODO: Factor out a 'filter_columns' function
reader = csv.DictReader(open(csv_infile))
# Do regular-expression matching of column names?
if match == 'regexp':
matching_columns = []
for expr in columns:
# TODO: What if more than one expression matches a column?
# Find a way to avoid duplicates.
matching_columns += matching_fields(expr, reader.fieldnames)
# Exact matching of column names
else:
matching_columns = columns
# Include or exclude?
if action == 'include':
keep_columns = matching_columns
else:
keep_columns = [col for col in reader.fieldnames
if col not in matching_columns]
# Create writer for the columns we're keeping; ignore any extra columns
# passed to the writerow() method.
writer = csv.DictWriter(open(csv_outfile, 'w'), keep_columns,
extrasaction='ignore')
# Write the header (csv.DictWriter doesn't do this for us)
writer.writerow(dict(zip(keep_columns, keep_columns)))
for row in reader:
writer.writerow(row)
def boring_columns(csvfile):
"""Return a list of column names in ``csvfile`` that are "boring"--that is,
the data in them is always the same.
"""
# TODO: Consider columns that never deviate much (less than 1%, say)
# to be boring also
reader = csv.DictReader(open(csvfile))
# Assume all columns are boring until they prove to be interesting
boring = list(reader.fieldnames)
# Remember the first value from each column
prev = reader.next()
for row in reader:
# Check boring columns to see if they have become interesting yet
# (make a copy to prevent problems with popping while iterating)
for col in list(boring):
# If previous value was empty, set prev to current
# (this handles the case where a column is empty for a while,
# then gets a value later). This is not inherently interesting.
if not prev[col].strip():
prev[col] = row[col]
# If the current value is non-empty, and different from the
# previous, then it's interesting
elif row[col].strip() and row[col] != prev[col]:
boring.remove(col)
# Return names of all columns that never became interesting
return boring
|
mit
| 8,186,793,736,834,113,000
| 33.220524
| 90
| 0.588081
| false
| 3.945871
| false
| false
| false
|
jobli/24
|
hour21_hangman.py
|
1
|
3040
|
import pygame
import sys
from random import choice
from pygame.locals import *
RED = (255, 0, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (255, 100, 0)
PURPLE = (100, 0, 255)
def get_words():
f = open("words.txt")
temp = f.readlines()
words = []
for word in temp:
words.append(word.strip())
return words
def draw_gallows(screen):
pygame.draw.rect(screen, PURPLE, (450, 350, 100, 10)) #bottom
pygame.draw.rect(screen, PURPLE, (495, 250, 10, 100)) #support
pygame.draw.rect(screen, PURPLE, (450, 250, 50, 10)) #crossbar
pygame.draw.rect(screen, PURPLE, (450, 250, 10, 25)) #noose
def draw_man(screen, body_part):
if body_part == "head":
pygame.draw.circle(screen, RED, (455, 270), 10) #head
if body_part == "body":
pygame.draw.line(screen, RED, (455, 280), (455, 320), 3) #body
if body_part == "l_arm":
pygame.draw.line(screen, RED, (455, 300), (445, 285), 3) #arm
if body_part == "r_arm":
pygame.draw.line(screen, RED, (455, 300), (465, 285), 3) #arm
if body_part == "l_leg":
pygame.draw.line(screen, RED, (455, 320), (445, 330), 3) #leg
if body_part == "r_leg":
pygame.draw.line(screen, RED, (455, 320), (465, 330), 3) #leg
def draw_word(screen, spaces):
x = 10
for i in range(spaces):
pygame.draw.line(screen, YELLOW, (x, 350), (x+20, 350), 3)
x += 30
def draw_letter(screen, font, word, guess):
x = 10
for letter in word:
if letter == guess:
letter = font.render(letter, 3, (255,255,255))
screen.blit(letter, (x, 300))
x += 30
def main():
pygame.init()
screen = pygame.display.set_mode((600,400))
font = pygame.font.SysFont("monospace", 30)
draw_gallows(screen)
draw_man(screen, body_part="head")
words = get_words()
word = choice(words)
draw_word(screen, len(word))
pygame.display.update()
body = ["r_leg", "l_leg", "r_arm", "l_arm", "body", "head"]
while body:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
if event.type == KEYDOWN:
if event.unicode.isalpha():
guess = event.unicode
if guess in word:
draw_letter(screen, font, word, guess)
text = font.render("Grattis !", 1, (0, 255, 0))
screen.blit(text, (40, 40))
pygame.display.update()
else:
body_part = body.pop()
draw_man(screen, body_part)
text = font.render("Synd....", 1, (0, 255, 0))
screen.blit(text, (80, 80))
pygame.display.update()
if __name__ == '__main__':
main()
|
gpl-3.0
| 4,980,498,610,657,509,000
| 27.679245
| 71
| 0.494737
| false
| 3.385301
| false
| false
| false
|
SpaceGroupUCL/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/generators/expanders.py
|
4
|
6194
|
"""Provides explicit constructions of expander graphs.
"""
import itertools
import networkx as nx
__all__ = ["margulis_gabber_galil_graph", "chordal_cycle_graph", "paley_graph"]
# Other discrete torus expanders can be constructed by using the following edge
# sets. For more information, see Chapter 4, "Expander Graphs", in
# "Pseudorandomness", by Salil Vadhan.
#
# For a directed expander, add edges from (x, y) to:
#
# (x, y),
# ((x + 1) % n, y),
# (x, (y + 1) % n),
# (x, (x + y) % n),
# (-y % n, x)
#
# For an undirected expander, add the reverse edges.
#
# Also appearing in the paper of Gabber and Galil:
#
# (x, y),
# (x, (x + y) % n),
# (x, (x + y + 1) % n),
# ((x + y) % n, y),
# ((x + y + 1) % n, y)
#
# and:
#
# (x, y),
# ((x + 2*y) % n, y),
# ((x + (2*y + 1)) % n, y),
# ((x + (2*y + 2)) % n, y),
# (x, (y + 2*x) % n),
# (x, (y + (2*x + 1)) % n),
# (x, (y + (2*x + 2)) % n),
#
def margulis_gabber_galil_graph(n, create_using=None):
r"""Returns the Margulis-Gabber-Galil undirected MultiGraph on `n^2` nodes.
The undirected MultiGraph is regular with degree `8`. Nodes are integer
pairs. The second-largest eigenvalue of the adjacency matrix of the graph
is at most `5 \sqrt{2}`, regardless of `n`.
Parameters
----------
n : int
Determines the number of nodes in the graph: `n^2`.
create_using : NetworkX graph constructor, optional (default MultiGraph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : graph
The constructed undirected multigraph.
Raises
------
NetworkXError
If the graph is directed or not a multigraph.
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed() or not G.is_multigraph():
msg = "`create_using` must be an undirected multigraph."
raise nx.NetworkXError(msg)
for (x, y) in itertools.product(range(n), repeat=2):
for (u, v) in (
((x + 2 * y) % n, y),
((x + (2 * y + 1)) % n, y),
(x, (y + 2 * x) % n),
(x, (y + (2 * x + 1)) % n),
):
G.add_edge((x, y), (u, v))
G.graph["name"] = f"margulis_gabber_galil_graph({n})"
return G
def chordal_cycle_graph(p, create_using=None):
"""Returns the chordal cycle graph on `p` nodes.
The returned graph is a cycle graph on `p` nodes with chords joining each
vertex `x` to its inverse modulo `p`. This graph is a (mildly explicit)
3-regular expander [1]_.
`p` *must* be a prime number.
Parameters
----------
p : a prime number
The number of vertices in the graph. This also indicates where the
chordal edges in the cycle will be created.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : graph
The constructed undirected multigraph.
Raises
------
NetworkXError
If `create_using` indicates directed or not a multigraph.
References
----------
.. [1] Theorem 4.4.2 in A. Lubotzky. "Discrete groups, expanding graphs and
invariant measures", volume 125 of Progress in Mathematics.
Birkhäuser Verlag, Basel, 1994.
"""
G = nx.empty_graph(0, create_using, default=nx.MultiGraph)
if G.is_directed() or not G.is_multigraph():
msg = "`create_using` must be an undirected multigraph."
raise nx.NetworkXError(msg)
for x in range(p):
left = (x - 1) % p
right = (x + 1) % p
# Here we apply Fermat's Little Theorem to compute the multiplicative
# inverse of x in Z/pZ. By Fermat's Little Theorem,
#
# x^p = x (mod p)
#
# Therefore,
#
# x * x^(p - 2) = 1 (mod p)
#
# The number 0 is a special case: we just let its inverse be itself.
chord = pow(x, p - 2, p) if x > 0 else 0
for y in (left, right, chord):
G.add_edge(x, y)
G.graph["name"] = f"chordal_cycle_graph({p})"
return G
def paley_graph(p, create_using=None):
"""Returns the Paley (p-1)/2-regular graph on p nodes.
The returned graph is a graph on Z/pZ with edges between x and y
if and only if x-y is a nonzero square in Z/pZ.
If p = 1 mod 4, -1 is a square in Z/pZ and therefore x-y is a square if and
only if y-x is also a square, i.e the edges in the Paley graph are symmetric.
If p = 3 mod 4, -1 is not a square in Z/pZ and therefore either x-y or y-x
is a square in Z/pZ but not both.
Note that a more general definition of Paley graphs extends this construction
to graphs over q=p^n vertices, by using the finite field F_q instead of Z/pZ.
This construction requires to compute squares in general finite fields and is
not what is implemented here (i.e paley_graph(25) does not return the true
Paley graph associated with 5^2).
Parameters
----------
p : int, an odd prime number.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : graph
The constructed directed graph.
Raises
------
NetworkXError
If the graph is a multigraph.
References
----------
Chapter 13 in B. Bollobas, Random Graphs. Second edition.
Cambridge Studies in Advanced Mathematics, 73.
Cambridge University Press, Cambridge (2001).
"""
G = nx.empty_graph(0, create_using, default=nx.DiGraph)
if G.is_multigraph():
msg = "`create_using` cannot be a multigraph."
raise nx.NetworkXError(msg)
# Compute the squares in Z/pZ.
# Make it a set to uniquify (there are exactly (p-1)/2 squares in Z/pZ
# when is prime).
square_set = {(x ** 2) % p for x in range(1, p) if (x ** 2) % p != 0}
for x in range(p):
for x2 in square_set:
G.add_edge(x, (x + x2) % p)
G.graph["name"] = f"paley({p})"
return G
|
gpl-3.0
| 6,139,182,787,235,020,000
| 29.658416
| 81
| 0.585015
| false
| 3.235632
| false
| false
| false
|
nkmk/python-snippets
|
notebook/opencv_hconcat_vconcat_np_tile.py
|
1
|
2317
|
import cv2
import numpy as np
im1 = cv2.imread('data/src/lena.jpg')
im2 = cv2.imread('data/src/rocket.jpg')
im_v = cv2.vconcat([im1, im1])
cv2.imwrite('data/dst/opencv_vconcat.jpg', im_v)
# True
im_v_np = np.tile(im1, (2, 1, 1))
cv2.imwrite('data/dst/opencv_vconcat_np.jpg', im_v_np)
# True
def vconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):
w_min = min(im.shape[1] for im in im_list)
im_list_resize = [cv2.resize(im, (w_min, int(im.shape[0] * w_min / im.shape[1])), interpolation=interpolation)
for im in im_list]
return cv2.vconcat(im_list_resize)
im_v_resize = vconcat_resize_min([im1, im2, im1])
cv2.imwrite('data/dst/opencv_vconcat_resize.jpg', im_v_resize)
# True
im_h = cv2.hconcat([im1, im1])
cv2.imwrite('data/dst/opencv_hconcat.jpg', im_h)
# True
im_h_np = np.tile(im1, (1, 2, 1))
cv2.imwrite('data/dst/opencv_hconcat_np.jpg', im_h_np)
# True
def hconcat_resize_min(im_list, interpolation=cv2.INTER_CUBIC):
h_min = min(im.shape[0] for im in im_list)
im_list_resize = [cv2.resize(im, (int(im.shape[1] * h_min / im.shape[0]), h_min), interpolation=interpolation)
for im in im_list]
return cv2.hconcat(im_list_resize)
im_h_resize = hconcat_resize_min([im1, im2, im1])
cv2.imwrite('data/dst/opencv_hconcat_resize.jpg', im_h_resize)
# True
def concat_tile(im_list_2d):
return cv2.vconcat([cv2.hconcat(im_list_h) for im_list_h in im_list_2d])
im1_s = cv2.resize(im1, dsize=(0, 0), fx=0.5, fy=0.5)
im_tile = concat_tile([[im1_s, im1_s, im1_s, im1_s],
[im1_s, im1_s, im1_s, im1_s],
[im1_s, im1_s, im1_s, im1_s]])
cv2.imwrite('data/dst/opencv_concat_tile.jpg', im_tile)
# True
im_tile_np = np.tile(im1_s, (3, 4, 1))
cv2.imwrite('data/dst/opencv_concat_tile_np.jpg', im_tile_np)
# True
def concat_tile_resize(im_list_2d, interpolation=cv2.INTER_CUBIC):
im_list_v = [hconcat_resize_min(im_list_h, interpolation=cv2.INTER_CUBIC) for im_list_h in im_list_2d]
return vconcat_resize_min(im_list_v, interpolation=cv2.INTER_CUBIC)
im_tile_resize = concat_tile_resize([[im1],
[im1, im2, im1, im2, im1],
[im1, im2, im1]])
cv2.imwrite('data/dst/opencv_concat_tile_resize.jpg', im_tile_resize)
# True
|
mit
| 5,698,015,391,255,300,000
| 34.646154
| 114
| 0.632283
| false
| 2.361876
| false
| false
| false
|
shootstar/novatest
|
nova/virt/powervm/exception.py
|
1
|
2438
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
class PowerVMConnectionFailed(exception.NovaException):
message = _('Connection to PowerVM manager failed')
class PowerVMFileTransferFailed(exception.NovaException):
message = _("File '%(file_path)s' transfer to PowerVM manager failed")
class PowerVMFTPTransferFailed(PowerVMFileTransferFailed):
message = _("FTP %(ftp_cmd)s from %(source_path)s to %(dest_path)s failed")
class PowerVMLPARInstanceNotFound(exception.InstanceNotFound):
message = _("LPAR instance '%(instance_name)s' could not be found")
class PowerVMLPARCreationFailed(exception.NovaException):
message = _("LPAR instance '%(instance_name)s' creation failed")
class PowerVMNoSpaceLeftOnVolumeGroup(exception.NovaException):
message = _("No space left on any volume group")
class PowerVMLPARAttributeNotFound(exception.NovaException):
pass
class PowerVMLPAROperationTimeout(exception.NovaException):
message = _("Operation '%(operation)s' on "
"LPAR '%(instance_name)s' timed out")
class PowerVMImageCreationFailed(exception.NovaException):
message = _("Image creation failed on PowerVM")
class PowerVMInsufficientFreeMemory(exception.NovaException):
message = _("Insufficient free memory on PowerVM system to spawn instance "
"'%(instance_name)s'")
class PowerVMInsufficientCPU(exception.NovaException):
message = _("Insufficient available CPUs on PowerVM system to spawn "
"instance '%(instance_name)s'")
class PowerVMLPARInstanceCleanupFailed(exception.NovaException):
message = _("PowerVM LPAR instance '%(instance_name)s' cleanup failed")
class PowerVMUnrecognizedRootDevice(exception.NovaException):
message = _("Unrecognized root disk information: '%(disk_info)s'")
|
apache-2.0
| 9,210,342,643,580,613,000
| 32.861111
| 79
| 0.73872
| false
| 4.153322
| false
| false
| false
|
joaduo/python-simplerpc
|
simplerpc/expose_api/javascript/TemplatesCollector.py
|
1
|
1940
|
# -*- coding: utf-8 -*-
'''
Simple RPC
Copyright (c) 2013, Joaquin G. Duo
'''
from simplerpc.base.SimpleRpcLogicBase import SimpleRpcLogicBase
from simplerpc.common.path import joinPath, splitPath
import os
from simplerpc.common.FileManager import FileManager
import fnmatch
class TemplatesCollector(SimpleRpcLogicBase):
'''
Collects templates into stores in the repository
to be used in the translation by the TranslationAstNode class.
'''
def __post_init__(self):
self.file_manager = FileManager(self.context)
def _getRepoPath(self, templates_set):
return joinPath(os.path.dirname(__file__), templates_set)
def _getTemplatesPaths(self, pattern, templates_set):
for root, _, files in os.walk(self._getRepoPath(templates_set),
followlinks=True):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def _buildNamespace(self, file_path, templates_set):
repo_split = splitPath(self._getRepoPath(templates_set))
namespace, _ = os.path.splitext(file_path)
namespace = splitPath(namespace)[len(repo_split):]
return '.'.join(namespace)
def collectBuiltIn(self, templates_set='javascript_templates'):
templates = dict()
for file_path in self._getTemplatesPaths('*.js', templates_set):
namespace = self._buildNamespace(file_path, templates_set)
template = self.file_manager.getTextFile(file_path)
templates[namespace] = template
return templates
def smokeTestModule():
from simplerpc.context.SimpleRpcContext import SimpleRpcContext
context = SimpleRpcContext('smoke test')
templates = TemplatesCollector(context).collectBuiltIn()
context.log(templates)
if __name__ == "__main__":
smokeTestModule()
|
bsd-3-clause
| -4,199,961,447,003,559,400
| 36.307692
| 72
| 0.665979
| false
| 4.181034
| false
| false
| false
|
mementum/backtrader
|
backtrader/indicators/mabase.py
|
1
|
2719
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils.py3 import with_metaclass
from . import Indicator
class MovingAverage(object):
'''MovingAverage (alias MovAv)
A placeholder to gather all Moving Average Types in a single place.
Instantiating a SimpleMovingAverage can be achieved as follows::
sma = MovingAverage.Simple(self.data, period)
Or using the shorter aliases::
sma = MovAv.SMA(self.data, period)
or with the full (forwards and backwards) names:
sma = MovAv.SimpleMovingAverage(self.data, period)
sma = MovAv.MovingAverageSimple(self.data, period)
'''
_movavs = []
@classmethod
def register(cls, regcls):
if getattr(regcls, '_notregister', False):
return
cls._movavs.append(regcls)
clsname = regcls.__name__
setattr(cls, clsname, regcls)
clsalias = ''
if clsname.endswith('MovingAverage'):
clsalias = clsname.split('MovingAverage')[0]
elif clsname.startswith('MovingAverage'):
clsalias = clsname.split('MovingAverage')[1]
if clsalias:
setattr(cls, clsalias, regcls)
class MovAv(MovingAverage):
pass # alias
class MetaMovAvBase(Indicator.__class__):
# Register any MovingAverage with the placeholder to allow the automatic
# creation of envelopes and oscillators
def __new__(meta, name, bases, dct):
# Create the class
cls = super(MetaMovAvBase, meta).__new__(meta, name, bases, dct)
MovingAverage.register(cls)
# return the class
return cls
class MovingAverageBase(with_metaclass(MetaMovAvBase, Indicator)):
params = (('period', 30),)
plotinfo = dict(subplot=False)
|
gpl-3.0
| -6,185,863,034,770,103,000
| 28.879121
| 79
| 0.634792
| false
| 4.070359
| false
| false
| false
|
coin-or/oBB
|
obb/gcest.py
|
1
|
2698
|
from __future__ import division
# Use the third order derivative tensor Gershgorin estimation method
def gcest(LT,UT,method):
# Get dimension
D = LT.shape[0]
# # Positivity/ Negativity tensor checks
# from numpy import all
# print('LT non-positive: %s') % all(LT <= 0)
# print('UT non-negative: %s') % all(UT >= 0)
#
# # Ediag check ef
# print('MA non-positive: %s') % all((UT+LT)/2 <= 0)
# print('RA non-negative: %s') % all((UT-LT)/2 >= 0)
# Gershgorin
# mRA = (UT-LT)/2. # radius tensor.
# mMA = (UT+LT)/2. # midpoint tensor
# for i in range(0,D):
# for j in range(0,D):
# for k in range(0,D):
# if((i==j)and(j==k)):
# mRA[i,j,k] = 0
# mMA[i,j,k] = LT[i,j,k]
# print('mMA non-positive: %s') % all(mMA <= 0)
# print('mRA non-negative: %s') % all(mRA >= 0)
#
# # lbH check (equivalent to Gersh like quad?)
# NRA = (LT-UT)/2.
# rs = (NRA.sum(axis=1)).sum(axis=1)
# A = (LT+UT)/2.
# for i in range(0,D):
# for j in range(0,D):
# for k in range(0,D):
# if((i==j)and(j==k)):
# A[i,j,k] = LT[i,j,k] + (rs[i] - NRA[i,j,k])
# print('lbH non-positive: %s') % all(A <= 0)
# Select estimation method (gc, c)
# Gershgorin for Tensors
if(method == 'gc'):
# Imports
from numpy import maximum, zeros
# Calculate max absolute value of bounds
VT = maximum(abs(LT),abs(UT))
# Get row plane sums
rs = (VT.sum(axis=1)).sum(axis=1)
# Tensor diagonal function
def diagt(T):
v = zeros(D)
for i in range(0,D):
v[i] = T[i,i,i]
return v
# Calculate lower bounds on Gershgorin disks
G = diagt(LT) - (rs-diagt(VT))
# Calculate Gershgorin lower bound
k = min(G)
# If k negative ok, if k positive need other bound
if(k < 0):
pass
#print('k ok, negative')
else:
#print('k positive, using other bound.')
k = (D**(-0.5))*k
return k
# Lh = norm_F(VT) so return -Lh
elif(method == 'c'):
# Imports
from numpy import maximum, sqrt, sum
# Calculate max absolute value of bounds
VT = maximum(abs(LT),abs(UT))
# Calculate frobenius norm of VT
return -sqrt(sum(sum(sum(VT ** 2))))
else:
raise RuntimeError('Method must be one of gc, c.')
|
lgpl-3.0
| 7,594,793,938,175,760,000
| 29.659091
| 79
| 0.467754
| false
| 3.211905
| false
| false
| false
|
Nander2/pypot_herkulex
|
pypot/sensor/kinect/sensor.py
|
1
|
3754
|
"""
This code has been developed by Baptiste Busch: https://github.com/buschbapti
This module allows you to retrieve Skeleton information from a Kinect device.
It is only the client side of a zmq client/server application.
The server part can be found at: https://bitbucket.org/buschbapti/kinectserver/src
It used the Microsoft Kinect SDK and thus only work on Windows.
Of course, the client side can be used on any platform.
"""
import zmq
import numpy
import threading
from collections import namedtuple
from ...utils import Point3D, Point2D, Quaternion
torso_joints = ('hip_center', 'spine', 'shoulder_center', 'head')
left_arm_joints = ('shoulder_left', 'elbow_left', 'wrist_left', 'hand_left')
right_arm_joints = ('shoulder_right', 'elbow_right', 'wrist_right', 'hand_right')
left_leg_joints = ('hip_left', 'knee_left', 'ankle_left', 'foot_left')
right_leg_joints = ('hip_right', 'knee_right', 'ankle_right', 'foot_right')
skeleton_joints = torso_joints + left_arm_joints + right_arm_joints + left_leg_joints + right_leg_joints
class Skeleton(namedtuple('Skeleton', ('timestamp', 'user_id') + skeleton_joints)):
joints = skeleton_joints
Joint = namedtuple('Joint', ('position', 'orientation', 'pixel_coordinate'))
class KinectSensor(object):
def __init__(self, addr, port):
self._lock = threading.Lock()
self._skeleton = {}
self.context = zmq.Context()
self.sub_skel = self.context.socket(zmq.SUB)
self.sub_skel.connect('tcp://{}:{}'.format(addr, port))
self.sub_skel.setsockopt(zmq.SUBSCRIBE, '')
t = threading.Thread(target=self.get_skeleton)
t.daemon = True
t.start()
def remove_user(self,user_index):
with self._lock:
del self._skeleton[user_index]
def remove_all_users(self):
with self._lock:
self._skeleton = {}
@property
def tracked_skeleton(self):
with self._lock:
return self._skeleton
@tracked_skeleton.setter
def tracked_skeleton(self, skeleton):
with self._lock:
self._skeleton[skeleton.user_id] = skeleton
def get_skeleton(self):
while True:
md = self.sub_skel.recv_json()
msg = self.sub_skel.recv()
skel_array = numpy.fromstring(msg, dtype=float, sep=",")
skel_array = skel_array.reshape(md['shape'])
nb_joints = md['shape'][0]
joints = []
for i in range(nb_joints):
x, y, z, w = skel_array[i][0:4]
position = Point3D(x / w, y / w, z / w)
pixel_coord = Point2D(*skel_array[i][4:6])
orientation = Quaternion(*skel_array[i][6:10])
joints.append(Joint(position,orientation,pixel_coord))
self.tracked_skeleton = Skeleton(md['timestamp'], md['user_index'], *joints)
def run(self):
cv2.startWindowThread()
while True:
img = numpy.zeros((480, 640, 3))
skeleton = kinect.tracked_skeleton
if skeleton:
for user,skel in skeleton.iteritems():
for joint_name in skel.joints:
x, y = getattr(skel, joint_name).pixel_coordinate
pt = (int(x),int(y))
cv2.circle(img, pt, 5, (255, 255, 255), thickness=-1)
kinect.remove_all_users()
cv2.imshow('Skeleton', img)
cv2.waitKey(50)
self.sub_skel.close()
self.context.term()
if __name__ == '__main__':
import cv2
kinect = KinectSensor('193.50.110.177', 9999)
kinect.run()
|
gpl-3.0
| 4,152,207,849,554,016,000
| 32.759259
| 104
| 0.579382
| false
| 3.47271
| false
| false
| false
|
legacysurvey/pipeline
|
validationtests/quicksipManera3.py
|
1
|
51044
|
from math import *
import numpy as np
import healpy as hp
import astropy.io.fits as pyfits
import time
import matplotlib.pyplot as plt
from multiprocessing import Pool
from multiprocessing.dummy import Pool as ThreadPool
import numpy.random
import os, errno
import subprocess
twopi = 2.*pi
piover2 = .5*pi
verbose = False
# ---------------------------------------------------------------------------------------- #
def quicksipVerbose(verb=False):
global verbose
verbose=verb
# Make directory
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
# Some unit definitions
arcsec_to_radians = 0.0000048481368111
degree_to_arcsec = 3600.0
# MarcM Global variable to debug
#nwrong = 0
# ---------------------------------------------------------------------------------------- #
# Write partial Healpix map to file
# indices are the indices of the pixels to be written
# values are the values to be written
def write_partial_map(filename, indices, values, nside, nest=False):
fitsformats = [hp.fitsfunc.getformat(np.int32), hp.fitsfunc.getformat(np.float32)]
column_names = ['PIXEL', 'SIGNAL']
# maps must have same length
assert len(set((len(indices), len(values)))) == 1, "Indices and values must have same length"
if nside < 0:
raise ValueError('Invalid healpix map : wrong number of pixel')
firstpix = np.min(indices)
lastpix = np.max(indices)
npix = np.size(indices)
cols=[]
for cn, mm, fm in zip(column_names, [indices, values], fitsformats):
cols.append(pyfits.Column(name=cn, format='%s' % fm, array=mm))
if False: # Deprecated : old way to create table with pyfits before v3.3
tbhdu = pyfits.new_table(cols)
else:
tbhdu = pyfits.BinTableHDU.from_columns(cols)
# add needed keywords
tbhdu.header['PIXTYPE'] = ('HEALPIX','HEALPIX pixelisation')
if nest: ordering = 'NESTED'
else: ordering = 'RING'
tbhdu.header['ORDERING'] = (ordering, 'Pixel ordering scheme, either RING or NESTED')
tbhdu.header['EXTNAME'] = ('xtension', 'name of this binary table extension')
tbhdu.header['NSIDE'] = (nside,'Resolution parameter of HEALPIX')
tbhdu.header['FIRSTPIX'] = (firstpix, 'First pixel # (0 based)')
tbhdu.header['OBS_NPIX'] = npix
tbhdu.header['GRAIN'] = 1
tbhdu.header['OBJECT'] = 'PARTIAL'
tbhdu.header['INDXSCHM'] = ('EXPLICIT', 'Indexing: IMPLICIT or EXPLICIT')
tbhdu.writeto(filename,clobber=True)
subprocess.call("gzip -f "+filename,shell=True)
# ---------------------------------------------------------------------------------------- #
# Find healpix ring number from z
def ring_num(nside, z, shift=0):
# ring = ring_num(nside, z [, shift=])
# returns the ring number in {1, 4*nside-1}
# from the z coordinate
# usually returns the ring closest to the z provided
# if shift = -1, returns the ring immediatly north (of smaller index) of z
# if shift = 1, returns the ring immediatly south (of smaller index) of z
my_shift = shift * 0.5
# equatorial
iring = np.round( nside*(2.0 - 1.5*z) + my_shift )
if (z > 2./3.):
iring = np.round( nside * np.sqrt(3.0*(1.0-z)) + my_shift )
if (iring == 0):
iring = 1
# south cap
if (z < -2./3.):
iring = np.round( nside * np.sqrt(3.0*(1.0+z)) - my_shift )
if (iring == 0):
iring = 1
iring = int(4*nside - iring)
# return ring number
return int(iring)
# ---------------------------------------------------------------------------------------- #
# returns the z coordinate of ring ir for Nside
def ring2z (nside, ir):
fn = float(nside)
if (ir < nside): # north cap
tmp = float(ir)
z = 1.0 - (tmp * tmp) / (3.0 * fn * fn)
elif (ir < 3*nside): # tropical band
z = float( 2*nside-ir ) * 2.0 / (3.0 * fn)
else: # polar cap (south)
tmp = float(4*nside - ir )
z = - 1.0 + (tmp * tmp) / (3.0 * fn * fn)
# return z
return z
# ---------------------------------------------------------------------------------------- #
def ang2pix_ring_ir(nside,ir,phi):
# c=======================================================================
# c gives the pixel number ipix (RING)
# c corresponding to angles theta and phi
# c=======================================================================
z = ring2z (nside, ir)
z0=2.0/3.0
za = fabs(z)
if phi >= twopi:
phi = phi - twopi
if phi < 0.:
phi = phi + twopi
tt = phi / piover2#;// ! in [0,4)
nl2 = 2*nside
nl4 = 4*nside
ncap = nl2*(nside-1)#// ! number of pixels in the north polar cap
npix = 12*nside*nside
if za <= z0:# {
jp = int(floor(nside*(0.5 + tt - z*0.75)))#; /*index of ascending edge line*/
jm = int(floor(nside*(0.5 + tt + z*0.75)))#; /*index of descending edge line*/
#ir = nside + 1 + jp - jm#;// ! in {1,2n+1} (ring number counted from z=2/3)
kshift = 0
if fmod(ir,2)==0.:
kshift = 1#;// ! kshift=1 if ir even, 0 otherwise
ip = int(floor( ( jp+jm - nside + kshift + 1 ) / 2 ) + 1)#;// ! in {1,4n}
if ip>nl4:
ip = ip - nl4
ipix1 = ncap + nl4*(ir-1) + ip
else:
tp = tt - floor(tt)#;// !MOD(tt,1.d0)
tmp = sqrt( 3.*(1. - za) )
jp = int(floor( nside * tp * tmp ))#;// ! increasing edge line index
jm = int(floor( nside * (1. - tp) * tmp ))#;// ! decreasing edge line index
#ir = jp + jm + 1#;// ! ring number counted from the closest pole
ip = int(floor( tt * ir ) + 1)#;// ! in {1,4*ir}
if ip>4*ir:
ip = ip - 4*ir
ipix1 = 2*ir*(ir-1) + ip
if z<=0.:
ipix1 = npix - 2*ir*(ir+1) + ip
return ipix1 - 1
# gives the list of Healpix pixels contained in [phi_low, phi_hi]
def in_ring_simp(nside, iz, phi_low, phi_hi, conservative=True):
pixmin = int(ang2pix_ring_ir(nside,iz,phi_low))
pixmax = int(ang2pix_ring_ir(nside,iz,phi_hi))
if pixmax < pixmin:
pixmin1 = pixmax
pixmax = pixmin
pixmin = pixmin1
listir = np.arange(pixmin, pixmax)
return listir
# gives the list of Healpix pixels contained in [phi_low, phi_hi]
def in_ring(nside, iz, phi_low, phi_hi, conservative=True):
# nir is the number of pixels found
# if no pixel is found, on exit nir =0 and result = -1
if phi_hi-phi_low == 0:
return -1
npix = hp.nside2npix(nside)
ncap = 2*nside*(nside-1) # number of pixels in the north polar cap
listir = -1
nir = 0
# identifies ring number
if ((iz >= nside) and (iz <= 3*nside)): # equatorial region
ir = iz - nside + 1 # in {1, 2*nside + 1}
ipix1 = ncap + 4*nside*(ir-1) # lowest pixel number in the ring
ipix2 = ipix1 + 4*nside - 1 # highest pixel number in the ring
kshift = ir % 2
nr = nside*4
else:
if (iz < nside): # north pole
ir = iz
ipix1 = 2*ir*(ir-1) # lowest pixel number in the ring
ipix2 = ipix1 + 4*ir - 1 # highest pixel number in the ring
else: # south pole
ir = 4*nside - iz
ipix1 = npix - 2*ir*(ir+1) # lowest pixel number in the ring
ipix2 = ipix1 + 4*ir - 1 # highest pixel number in the ring
nr = int(ir*4)
kshift = 1
twopi = 2.*np.pi
shift = kshift * .5
if conservative:
# conservative : include every intersected pixels,
# even if pixel CENTER is not in the range [phi_low, phi_hi]
ip_low = round (nr * phi_low / twopi - shift)
ip_hi = round (nr * phi_hi / twopi - shift)
ip_low = ip_low % nr # in {0,nr-1}
ip_hi = ip_hi % nr # in {0,nr-1}
else:
# strict : include only pixels whose CENTER is in [phi_low, phi_hi]
ip_low = np.ceil (nr * phi_low / twopi - shift)
ip_hi = np.floor(nr * phi_hi / twopi - shift)
diff = (ip_low - ip_hi) % nr # in {-nr+1,nr-1}
if (diff < 0):
diff = diff + nr # in {0,nr-1}
if (ip_low >= nr):
ip_low = ip_low - nr
if (ip_hi < 0 ):
ip_hi = ip_hi + nr
#print ip_hi-ip_low,nr
if phi_low <= 0.0 and phi_hi >= 2.0*np.pi:
ip_low = 0
ip_hi = nr - 1
if (ip_low > ip_hi):
to_top = True
else:
to_top = False
ip_low = int( ip_low + ipix1 )
ip_hi = int( ip_hi + ipix1 )
ipix1 = int(ipix1)
if (to_top):
nir1 = int( ipix2 - ip_low + 1 )
nir2 = int( ip_hi - ipix1 + 1 )
nir = int( nir1 + nir2 )
if ((nir1 > 0) and (nir2 > 0)):
listir = np.concatenate( (np.arange(ipix1, nir2+ipix1), np.arange(ip_low, nir1+ip_low) ) )
else:
if nir1 == 0:
listir = np.arange(ipix1, nir2+ipix1)
if nir2 == 0:
listir = np.arange(ip_low, nir1+ip_low)
else:
nir = int(ip_hi - ip_low + 1 )
listir = np.arange(ip_low, nir+ip_low)
#below added by AJR to address region around ra = 360
if float(listir[-1]-listir[0])/(ipix2-ipix1) > .5:
listir1 = np.arange(ipix1, listir[0]+1)
listir2 = np.arange(listir[-1], ipix2+1)
# #print listir[-1],listir[0],ipix1,ipix2,len(listir1),len(listir2)
listir = np.concatenate( (listir1,listir2 ) )
#print len(listir)
return listir
# ---------------------------------------------------------------------------------------- #
# Linear interpolation
def lininterp(xval, xA, yA, xB, yB):
slope = (yB-yA) / (xB-xA)
yval = yA + slope * (xval - xA)
return yval
# ---------------------------------------------------------------------------------------- #
# Test if val beints to interval [b1, b2]
def inInter(val, b1, b2):
if b1 <= b2:
return np.logical_and( val <= b2, val >= b1 )
else:
return np.logical_and( val <= b1, val >= b2 )
# ---------------------------------------------------------------------------------------- #
# Test if a list of (theta,phi) values below to a region defined by its corners (theta,phi) for Left, Right, Bottom, Upper
def in_region(thetavals, phivals, thetaU, phiU, thetaR, phiR, thetaL, phiL, thetaB, phiB):
npts = len(thetavals)
phis = np.ndarray( (npts, 4) )
thetas = np.ndarray( (npts, 4) )
inds_phi = np.ndarray( (npts, 4), dtype=bool )
inds_phi[:,:] = False
inds_theta = np.ndarray( (npts, 4), dtype=bool )
inds_theta[:,:] = False
if thetaU != thetaB:
phis[:,0] = lininterp(thetavals, thetaB, phiB, thetaU, phiU)
inds_phi[:,0] = inInter(thetavals, thetaB, thetaU)
if thetaL != thetaU:
phis[:,1] = lininterp(thetavals, thetaU, phiU, thetaL, phiL)
inds_phi[:,1] = inInter(thetavals, thetaU, thetaL)
inds_phi[phis[:,0]==phis[:,1],1] = False
if thetaL != thetaR:
phis[:,2] = lininterp(thetavals, thetaL, phiL, thetaR, phiR)
inds_phi[:,2] = inInter(thetavals, thetaL, thetaR)
inds_phi[phis[:,0]==phis[:,2],2] = False
inds_phi[phis[:,1]==phis[:,2],2] = False
if thetaR != thetaB:
phis[:,3] = lininterp(thetavals, thetaR, phiR, thetaB, phiB)
inds_phi[:,3] = inInter(thetavals, thetaR, thetaB)
inds_phi[phis[:,0]==phis[:,3],3] = False
inds_phi[phis[:,1]==phis[:,3],3] = False
inds_phi[phis[:,2]==phis[:,3],3] = False
if phiU != phiB:
thetas[:,0] = lininterp(phivals, phiB, thetaB, phiU, thetaU)
inds_theta[:,0] = inInter(phivals, phiB, phiU)
if phiL != phiU:
thetas[:,1] = lininterp(phivals, phiU, thetaU, phiL, thetaL)
inds_theta[:,1] = inInter(phivals, phiU, phiL)
inds_theta[thetas[:,0]==thetas[:,1],1] = False
if phiL != phiR:
thetas[:,2] = lininterp(phivals, phiL, thetaL, phiR, thetaR)
inds_theta[:,2] = inInter(phivals, phiL, phiR)
inds_theta[thetas[:,0]==thetas[:,2],2] = False
inds_theta[thetas[:,1]==thetas[:,2],2] = False
if phiR != phiB:
thetas[:,3] = lininterp(phivals, phiR, thetaR, phiB, thetaB)
inds_theta[:,3] = inInter(phivals, phiR, phiB)
inds_theta[thetas[:,0]==thetas[:,3],3] = False
inds_theta[thetas[:,1]==thetas[:,3],3] = False
inds_theta[thetas[:,2]==thetas[:,3],3] = False
ind = np.where(np.logical_and(inds_phi[:,:].sum(axis=1)>1, inds_theta[:,:].sum(axis=1)>1))[0]
res = np.ndarray( (npts, ), dtype=bool )
res[:] = False
for i in ind:
phival = phivals[i]
thetaval = thetavals[i]
phis_loc = phis[i,inds_phi[i,:]]
thetas_loc = thetas[i,inds_theta[i,:]]
res[i] = (phival >= phis_loc[0]) & (phival <= phis_loc[1]) & (thetaval >= thetas_loc[0]) & (thetaval <= thetas_loc[1])
return res
# ---------------------------------------------------------------------------------------- #
# Computes healpix pixels of propertyArray.
# pixoffset is the number of pixels to truncate on the edges of each ccd image.
# ratiores is the super-resolution factor, i.e. the edges of each ccd image are processed
# at resultion 4*nside and then averaged at resolution nside.
#def computeHPXpix_sequ_new(nside, propertyArray, pixoffset=0, ratiores=4, coadd_cut=True):
def computeHPXpix_sequ_new(nside, propertyArray, pixoffset=0, ratiores=4, coadd_cut=False):
#return 'ERROR'
#img_ras, img_decs = [propertyArray[v] for v in ['ra0', 'ra1', 'ra2','ra3']],[propertyArray[v] for v in ['dec0', 'dec1', 'dec2','dec3']]
#x = [1+pixoffset, propertyArray['NAXIS1']-pixoffset, propertyArray['NAXIS1']-pixoffset, 1+pixoffset, 1+pixoffset]
#y = [1+pixoffset, 1+pixoffset, propertyArray['NAXIS2']-pixoffset, propertyArray['NAXIS2']-pixoffset, 1+pixoffset]
#if np.any(img_ras > 360.0):
# img_ras[img_ras > 360.0] -= 360.0
#if np.any(img_ras < 0.0):
# img_ras[img_ras < 0.0] += 360.0
#print 'in here'
#print len(img_ras)#,len(img_ras[0])
#plt.plot(img_ras[0],img_decs[0],'k,')
#plt.show()
img_ras, img_decs = computeCorners_WCS_TPV(propertyArray, pixoffset)
#DEBUGGING - MARCM
#print "debugging img_ras img_decs", img_ras
#for i in range(0,len(img_ras)):
# if img_ras[i] > 360.:
# img_ras[i] -= 360.
# if img_ras[i] < 0.:
# img_ras[i] += 360.
#END DEBUGGING MARCM BIT
# Coordinates of coadd corners
# RALL, t.DECLL, t.RAUL, t.DECUL, t.RAUR, t.DECUR, t.RALR, t.DECLR, t.URALL, t.UDECLL, t.URAUR, t.UDECUR
if coadd_cut:
#coadd_ras = [propertyArray[v] for v in ['URAUL', 'URALL', 'URALR', 'URAUR']]
#coadd_decs = [propertyArray[v] for v in ['UDECUL', 'UDECLL', 'UDECLR', 'UDECUR']]
coadd_ras = [propertyArray[v] for v in ['ra0', 'ra1', 'ra2', 'ra3']]
coadd_decs = [propertyArray[v] for v in ['dec0', 'dec1', 'dec2', 'dec3']]
coadd_phis = np.multiply(coadd_ras, np.pi/180)
coadd_thetas = np.pi/2 - np.multiply(coadd_decs, np.pi/180)
else:
coadd_phis = 0.0
coadd_thetas = 0.0
# Coordinates of image corners
#print img_ras
img_phis = np.multiply(img_ras , np.pi/180)
img_thetas = np.pi/2 - np.multiply(img_decs , np.pi/180)
img_pix = hp.ang2pix(nside, img_thetas, img_phis, nest=False)
pix_thetas, pix_phis = hp.pix2ang(nside, img_pix, nest=False)
# DEBUGGING - MARCM
#print 'pix_thetas', pix_thetas
#print 'pix_phis', pix_phis
#sys.exit()
#img_phis = np.mod( img_phis + np.pi, 2*np.pi ) # Enable these two lines to rotate everything by 180 degrees
#coadd_phis = np.mod( coadd_phis + np.pi, 2*np.pi ) # Enable these two lines to rotate everything by 180 degrees
# MARCM patch to correct a bug from Boris which didn't get bass and mzls ccds corners properly oriented.
# This patch is not necesarily comprehensive; not pairing may not cover all cases
# In addition it also needs checking what hapens around phi=0
dph01=abs(img_phis[0]-img_phis[1])
dph12=abs(img_phis[1]-img_phis[2])
if (dph01 < dph12) :
if (img_phis[1] < img_phis[2]):
if(img_thetas[0] < img_thetas[1]):
# this was original bit
#print "This is DECaLS"
ind_U = 0
ind_L = 2
ind_R = 3
ind_B = 1
else:
# This is for MzLS (seems to rotate other way)
#print "This is MzLS"
ind_U = 1
ind_L = 3
ind_R = 2
ind_B = 0
# print "Probably wrong indexing of ccd corner AAA"
else:
# This is addes for BASS
#print "This is for BASS"
if(img_thetas[0] > img_thetas[1]):
ind_U = 2
ind_L = 0
ind_R = 1
ind_B = 3
else:
# Few o(100) ccd of DECaLS z-band fall here; not clear what to do on them
#ind_U = 3
#ind_L = 1
#ind_R = 0
#ind_B = 2
ind_U = 0
ind_L = 2
ind_R = 3
ind_B = 1
else:
print("WARNING: (MARCM:) Current ccd image may have wrong corner assignments in quicksip")
#raise ValueError("(MARCM:) probably wrong assignment of corner values in quicksip")
#ind_U = 0
#ind_L = 2
#ind_R = 3
#ind_B = 1
ind_U = 3
ind_L = 1
ind_R = 0
ind_B = 2
ipix_list = np.zeros(0, dtype=int)
weight_list = np.zeros(0, dtype=float)
# loop over rings until reached bottom
iring_U = ring_num(nside, np.cos(img_thetas.min()), shift=0)
iring_B = ring_num(nside, np.cos(img_thetas.max()), shift=0)
ipixs_ring = []
pmax = np.max(img_phis)
pmin = np.min(img_phis)
if (pmax - pmin > np.pi):
ipixs_ring = np.int64(np.concatenate([in_ring(nside, iring, pmax, pmin, conservative=True) for iring in range(iring_U-1, iring_B+1)]))
else:
ipixs_ring = np.int64(np.concatenate([in_ring(nside, iring, pmin, pmax, conservative=True) for iring in range(iring_U-1, iring_B+1)]))
ipixs_nest = hp.ring2nest(nside, ipixs_ring)
npixtot = hp.nside2npix(nside)
if ratiores > 1:
subipixs_nest = np.concatenate([np.arange(ipix*ratiores**2, ipix*ratiores**2+ratiores**2, dtype=np.int64) for ipix in ipixs_nest])
nsubpixperpix = ratiores**2
else:
subipixs_nest = ipixs_nest
nsubpixperpix = 1
rangepix_thetas, rangepix_phis = hp.pix2ang(nside*ratiores, subipixs_nest, nest=True)
#subipixs_ring = hp.ang2pix(nside*ratiores, rangepix_thetas, rangepix_phis, nest=False).reshape(-1, nsubpixperpix)
if (pmax - pmin > np.pi) or (np.max(coadd_phis) - np.min(coadd_phis) > np.pi):
#DEBUGGING - MARCM
#print "Eps debugging"
img_phis= np.mod( img_phis + np.pi, 2*np.pi )
coadd_phis= np.mod( coadd_phis + np.pi, 2*np.pi )
rangepix_phis = np.mod( rangepix_phis + np.pi, 2*np.pi )
subweights = in_region(rangepix_thetas, rangepix_phis,
img_thetas[ind_U], img_phis[ind_U], img_thetas[ind_L], img_phis[ind_L],
img_thetas[ind_R], img_phis[ind_R], img_thetas[ind_B], img_phis[ind_B])
# DEBUGGING - MARCM
#print 'pmax pmin', pmax, pmin
#print 'img_thetas again', img_thetas
#print 'img_phis again', img_phis
#print 'rangepix_phis', rangepix_phis
#print 'rangepix_theta', rangepix_thetas
#print 'subweights', subweights
if coadd_cut:
subweights_coadd = in_region(rangepix_thetas, rangepix_phis,
coadd_thetas[ind_U], coadd_phis[ind_U], coadd_thetas[ind_L], coadd_phis[ind_L],
coadd_thetas[ind_R], coadd_phis[ind_R], coadd_thetas[ind_B], coadd_phis[ind_B])
resubweights = np.logical_and(subweights, subweights_coadd).reshape(-1, nsubpixperpix)
else:
resubweights = subweights.reshape(-1, nsubpixperpix)
sweights = resubweights.sum(axis=1) / float(nsubpixperpix)
ind = (sweights > 0.0)
# DEBUGGING - MARCM
#print 'ind', ind
#print 'ipixs_ring', ipixs_ring
return ipixs_ring[ind], sweights[ind], img_thetas, img_phis, resubweights[ind,:]
def computeHPXpix_sequ_new_simp(nside, propertyArray):
#return 'ERROR'
#Hack by AJR and MarcM, just return all of the pixel centers within the ra,dec range
img_ras, img_decs = [propertyArray[v] for v in ['ra0', 'ra1', 'ra2','ra3']],[propertyArray[v] for v in ['dec0', 'dec1', 'dec2','dec3']]
#print min(img_ras),max(img_ras)
#more efficient version below failed for some reason
#iweird = 0
for i in range(0,len(img_ras)):
if img_ras[i] > 360.:
img_ras[i] -= 360.
if img_ras[i] < 0.:
img_ras[i] += 360.
#if max(img_ras) - min(img_ras) > 1.:
# print img_ras,img_decs
#if np.any(img_ras > 360.0):
# img_ras[img_ras > 360.0] -= 360.0
#if np.any(img_ras < 0.0):
# img_ras[img_ras < 0.0] += 360.0
# Coordinates of image corners
#print img_ras
img_phis = np.multiply(img_ras , np.pi/180.)
img_thetas = np.pi/2. - np.multiply(img_decs , np.pi/180.)
img_pix = hp.ang2pix(nside, img_thetas, img_phis, nest=False)
pix_thetas, pix_phis = hp.pix2ang(nside, img_pix, nest=False)
ipix_list = np.zeros(0, dtype=int)
# loop over rings until reached bottom
iring_U = ring_num(nside, np.cos(img_thetas.min()), shift=0)
iring_B = ring_num(nside, np.cos(img_thetas.max()), shift=0)
ipixs_ring = []
pmax = np.max(img_phis)
pmin = np.min(img_phis)
if pmax-pmin == 0:
return []
p1 = pmin
p2 = pmax
if pmin < .1 and pmax > 1.9*np.pi:
#straddling line
#img_phis.sort()
for i in range(0,len(img_phis)):
if img_phis[i] > p1 and img_phis[i] < np.pi:
p1 = img_phis[i]
if img_phis[i] < p2 and img_phis[i] > np.pi:
p2 = img_phis[i]
#print 'kaka', img_phis, img_ras
#print 'kaka', p1, p2, iring_U, iring_B
ipixs_ring1 = np.int64(np.concatenate([in_ring(nside, iring, 0, p1, conservative=False) for iring in range(iring_U, iring_B+1)]))
ipixs_ring2 = np.int64(np.concatenate([in_ring(nside, iring, p2, 2.*np.pi, conservative=False) for iring in range(iring_U, iring_B+1)]))
#ipixs_ring1 = np.int64(np.concatenate([in_ring_simp(nside, iring, 0, p1, conservative=False) for iring in range(iring_U, iring_B+1)]))
#ipixs_ring2 = np.int64(np.concatenate([in_ring_simp(nside, iring, p2, 2.*np.pi, conservative=False) for iring in range(iring_U, iring_B+1)]))
ipixs_ring = np.concatenate((ipixs_ring1,ipixs_ring2))
# print len(ipixs_ring),len(ipixs_ring1),len(ipixs_ring2),iring_B-iring_U,pmin,pmax,p1,p2
#
if len(ipixs_ring1) > 1000:
print( 'kaka1', p1, iring_U, iring_B)
if len(ipixs_ring2) > 1000:
print( 'kaka2', p2, iring_U, iring_B)
else:
ipixs_ring = np.int64(np.concatenate([in_ring(nside, iring, p1, p2, conservative=False) for iring in range(iring_U, iring_B+1)]))
#ipixs_ring = np.int64(np.concatenate([in_ring_simp(nside, iring, p1, p2, conservative=False) for iring in range(iring_U, iring_B+1)]))
if len(ipixs_ring) > 1000:
#print 'hey', img_ras,img_decs
print( 'careful', len(ipixs_ring),iring_B-iring_U,pmin,pmax,p1,p2)
#nwrong = nwrong +1
return [] #temporary fix
# print len(ipixs_ring),iring_B-iring_U,pmin,pmax,min(img_ras),max(img_ras)
#print len(ipixs_ring),iring_B-iring_U,pmin,pmax,min(img_ras),max(img_ras)
return ipixs_ring
# ---------------------------------------------------------------------------------------- #
# Crucial routine: read properties of a ccd image and returns its corners in ra dec.
# pixoffset is the number of pixels to truncate on the edges of each ccd image.
def computeCorners_WCS_TPV(propertyArray, pixoffset):
#x = [1+pixoffset, propertyArray['NAXIS1']-pixoffset, propertyArray['NAXIS1']-pixoffset, 1+pixoffset, 1+pixoffset]
#y = [1+pixoffset, 1+pixoffset, propertyArray['NAXIS2']-pixoffset, propertyArray['NAXIS2']-pixoffset, 1+pixoffset]
x = [1+pixoffset, propertyArray['width']-pixoffset, propertyArray['width']-pixoffset, 1+pixoffset, 1+pixoffset]
y = [1+pixoffset, 1+pixoffset, propertyArray['height']-pixoffset, propertyArray['height']-pixoffset, 1+pixoffset]
#ras, decs = xy2radec(x, y, propertyArray)
ras, decs = xy2radec_nopv(x, y, propertyArray)
return ras, decs
# ---------------------------------------------------------------------------------------- #
# Performs WCS inverse projection to obtain ra dec from ccd image information.
def xy2radec(x, y, propertyArray):
crpix = np.array( [ propertyArray['CRPIX1'], propertyArray['CRPIX2'] ] )
cd = np.array( [ [ propertyArray['CD1_1'], propertyArray['CD1_2'] ],
[ propertyArray['CD2_1'], propertyArray['CD2_2'] ] ] )
pv1 = [ float(propertyArray['PV1_'+str(k)]) for k in range(11) if k != 3 ] # if k != 3
pv2 = [ float(propertyArray['PV2_'+str(k)]) for k in range(11) if k != 3 ] # if k != 3
pv = np.array( [ [ [ pv1[0], pv1[2], pv1[5], pv1[9] ],
[ pv1[1], pv1[4], pv1[8], 0. ],
[ pv1[3], pv1[7], 0. , 0. ],
[ pv1[6], 0. , 0. , 0. ] ],
[ [ pv2[0], pv2[1], pv2[3], pv2[6] ],
[ pv2[2], pv2[4], pv2[7], 0. ],
[ pv2[5], pv2[8], 0. , 0. ],
[ pv2[9], 0. , 0. , 0. ] ] ] )
center_ra = propertyArray['CRVAL1'] * np.pi / 180.0
center_dec = propertyArray['CRVAL2'] * np.pi / 180.0
ras, decs = radec_gnom(x, y, center_ra, center_dec, cd, crpix, pv)
ras = np.multiply( ras, 180.0 / np.pi )
decs = np.multiply( decs, 180.0 / np.pi )
if np.any(ras > 360.0):
ras[ras > 360.0] -= 360.0
if np.any(ras < 0.0):
ras[ras < 0.0] += 360.0
return ras, decs
def xy2radec_nopv(x, y, propertyArray):
crpix = np.array( [ propertyArray['crpix1'], propertyArray['crpix2'] ] )
cd = np.array( [ [ propertyArray['cd1_1'], propertyArray['cd1_2'] ],
[ propertyArray['cd2_1'], propertyArray['cd2_2'] ] ] )
center_ra = propertyArray['crval1'] * np.pi / 180.0
center_dec = propertyArray['crval2'] * np.pi / 180.0
ras, decs = radec_gnom(x, y, center_ra, center_dec, cd, crpix, pv=False)
ras = np.multiply( ras, 180.0 / np.pi )
decs = np.multiply( decs, 180.0 / np.pi )
if np.any(ras > 360.0):
ras[ras > 360.0] -= 360.0
if np.any(ras < 0.0):
ras[ras < 0.0] += 360.0
return ras, decs
# ---------------------------------------------------------------------------------------- #
# Deproject into ra dec values
def deproject_gnom(u, v, center_ra, center_dec):
u *= arcsec_to_radians
v *= arcsec_to_radians
rsq = u*u + v*v
cosc = sinc_over_r = 1./np.sqrt(1.+rsq)
cosdec = np.cos(center_dec)
sindec = np.sin(center_dec)
sindec = cosc * sindec + v * sinc_over_r * cosdec
tandra_num = -u * sinc_over_r
tandra_denom = cosc * cosdec - v * sinc_over_r * sindec
dec = np.arcsin(sindec)
ra = center_ra + np.arctan2(tandra_num, tandra_denom)
return ra, dec
# ---------------------------------------------------------------------------------------- #
def radec_gnom(x, y, center_ra, center_dec, cd, crpix, pv):
p1 = np.array( [ np.atleast_1d(x), np.atleast_1d(y) ] )
p2 = np.dot(cd, p1 - crpix[:,np.newaxis])
u = p2[0]
v = p2[1]
if pv:
usq = u*u
vsq = v*v
ones = np.ones(u.shape)
upow = np.array([ ones, u, usq, usq*u ])
vpow = np.array([ ones, v, vsq, vsq*v ])
temp = np.dot(pv, vpow)
p2 = np.sum(upow * temp, axis=1)
u = - p2[0] * degree_to_arcsec
v = p2[1] * degree_to_arcsec
else:
u = -u * degree_to_arcsec
v = v * degree_to_arcsec
ra, dec = deproject_gnom(u, v, center_ra, center_dec)
return ra, dec
# ---------------------------------------------------------------------------------------- #
# Class for a pixel of the map, containing trees of images and values
class NDpix_simp:
def __init__(self, propertyArray_in):
self.nbelem = 1
self.ratiores = 1
self.propertyArray = [propertyArray_in]
def addElem(self, propertyArray_in):
self.nbelem += 1
self.propertyArray.append(propertyArray_in)
# Project NDpix into a single number
# for a given property and operation applied to its array of images
def project(self, property, weights, operation):
asperpix = 0.263
A = np.pi*(1.0/asperpix)**2
pis = np.array([1.0 for proparr in self.propertyArray])
# No super-resolution or averaging
vals = np.array([proparr[property] for proparr in self.propertyArray])
if operation == 'mean':
return np.mean(vals)
if operation == 'median':
return np.median(vals)
if operation == 'total':
return np.sum(vals)
if operation == 'min':
return np.min(vals)
if operation == 'max':
return np.max(vals)
if operation == 'maxmin':
return np.max(vals) - np.min(vals)
if operation == 'fracdet':
return 1.0
if operation == 'num':
return len(vals)
# Class for a pixel of the map, containing trees of images and values
class NDpix:
def __init__(self, propertyArray_in, inweights, ratiores):
self.ratiores = ratiores
self.nbelem = 1
self.propertyArray = [propertyArray_in]
if self.ratiores > 1:
self.weights = np.array([inweights])
def addElem(self, propertyArray_in, inweights):
self.nbelem += 1
self.propertyArray.append(propertyArray_in)
if self.ratiores > 1:
self.weights = np.vstack( (self.weights, inweights) )
# Project NDpix into a single number
# for a given property and operation applied to its array of images
def project(self, property, weights, operation):
asperpix = 0.263
A = np.pi*(1.0/asperpix)**2
# Computes COADD weights
if weights == 'coaddweights3' or weights == 'coaddweights2' or weights == 'coaddweights' or property == 'maglimit2' or property == 'maglimit' or property == 'maglimit3' or property == 'sigmatot':
m_zpi = np.array([proparr['MAGZP'] for proparr in self.propertyArray])
if property == 'sigmatot':
m_zp = np.array([30.0 for proparr in self.propertyArray])
else:
m_zp = np.array([proparr['COADD_MAGZP'] for proparr in self.propertyArray])
if weights == 'coaddweights' or property == 'maglimit':
sigma_bgi = np.array([
1.0/np.sqrt((proparr['WEIGHTA']+proparr['WEIGHTB'])/2.0)
if (proparr['WEIGHTA']+proparr['WEIGHTB']) >= 0.0 else proparr['SKYSIGMA']
for proparr in self.propertyArray])
if weights == 'coaddweights2' or property == 'maglimit2':
sigma_bgi = np.array([
0.5/np.sqrt(proparr['WEIGHTA'])+0.5/np.sqrt(proparr['WEIGHTB'])
if (proparr['WEIGHTA']+proparr['WEIGHTB']) >= 0.0 else proparr['SKYSIGMA']
for proparr in self.propertyArray])
if weights == 'coaddweights3' or property == 'maglimit3' or property == 'sigmatot':
sigma_bgi = np.array([proparr['SKYSIGMA'] for proparr in self.propertyArray])
sigpis = 100**((m_zpi-m_zp)/5.0)
mspis = (sigpis/sigma_bgi)**2.0
pis = (sigpis/sigma_bgi)**2.0
elif weights == 'invsqrtexptime':
pis = np.array([ 1.0 / np.sqrt(proparr['EXPTIME']) for proparr in self.propertyArray])
else:
pis = np.array([1.0 for proparr in self.propertyArray])
pis = np.divide(pis, pis.mean())
# No super-resolution or averaging
if self.ratiores == 1:
if property == 'count':
vals = np.array([1.0 for proparr in self.propertyArray])
elif property == 'sigmatot':
return np.sqrt(1.0 / mspis.sum())
elif property == 'maglimit3' or property == 'maglimit2' or property == 'maglimit':
sigma2_tot = 1.0 / mspis.sum()
return np.mean(m_zp) - 2.5*np.log10(10*np.sqrt(A*sigma2_tot) )
else:
vals = np.array([proparr[property] for proparr in self.propertyArray])
vals = vals * pis
if operation == 'mean':
return np.mean(vals)
if operation == 'median':
return np.median(vals)
if operation == 'total':
return np.sum(vals)
if operation == 'min':
return np.min(vals)
if operation == 'max':
return np.max(vals)
if operation == 'maxmin':
return np.max(vals) - np.min(vals)
if operation == 'fracdet':
return 1.0
if operation == 'num':
return len(vals)
# Retrieve property array and apply operation (with super-resolution)
if property == 'count':
vals = np.array([1.0 for proparr in self.propertyArray])
elif property == 'maglimit2' or property == 'maglimit' or property == 'maglimit3' or property == 'sigmatot':
vals = (sigpis/sigma_bgi)**2
else:
#print property
vals = np.array([proparr[property] for proparr in self.propertyArray])
vals = vals * pis
theweights = self.weights
weightedarray = (theweights.T * vals).T
counts = (theweights.T * pis).sum(axis=1)
ind = counts > 0
if property == 'maglimit' or property == 'maglimit2' or property == 'maglimit3':
sigma2_tot = 1.0 / weightedarray.sum(axis=0)
maglims = np.mean(m_zp) - 2.5*np.log10(10*np.sqrt(A*sigma2_tot) )
return maglims[ind].mean()
if property == 'sigmatot':
sigma2_tot = 1.0 / weightedarray.sum(axis=0)
return np.sqrt(sigma2_tot)[ind].mean()
if operation == 'min':
return np.min(vals)
if operation == 'max':
return np.max(vals)
if operation == 'maxmin':
return np.max(vals) - np.min(vals)
if operation == 'mean':
return (weightedarray.sum(axis=0) / counts)[ind].mean()
if operation == 'median':
return np.ma.median(np.ma.array(weightedarray, mask=np.logical_not(theweights)), axis=0)[ind].mean()
if operation == 'total':
return weightedarray.sum(axis=0)[ind].mean()
if operation == 'fracdet':
temp = weightedarray.sum(axis=0)
return temp[ind].size / float(temp.size)
if operation == 'num':
return len(vals)
# ---------------------------------------------------------------------------------------- #
# Project NDpix into a value
def projectNDpix(args):
pix, property, weights, operation = args
if pix != 0:
return pix.project(self, property, weights, operation)
else:
return hp.UNSEEN
# Create a "healtree", i.e. a set of pixels with trees of images in them.
def makeHealTree(args):
samplename, nside, ratiores, pixoffset, tbdata = args
treemap = HealTree(nside)
verbcount = 1000
count = 0
start = time.time()
duration = 0
if(verbose): print( '>', samplename, ': starting tree making')
for i, propertyArray in enumerate(tbdata):
count += 1
start_one = time.time()
# DEBUGGING - MARCM
#print "debugging i ", i
treemap.addElem(propertyArray, ratiores, pixoffset)
end_one = time.time()
duration += float(end_one - start_one)
if count == verbcount:
if(verbose): print( '>', samplename, ': processed images', i-verbcount+1, '-', i+1, '(on '+str(len(tbdata))+') in %.2f' % duration, 'sec (~ %.3f' % (duration/float(verbcount)), 'per image)')
count = 0
duration = 0
end = time.time()
if(verbose): print('>', samplename, ': tree making took : %.2f' % float(end - start), 'sec for', len(tbdata), 'images')
return treemap
def makeHealTree_simp(args):
#hack by AJR
samplename, nside, tbdata = args
treemap = HealTree(nside)
verbcount = 1000
count = 0
start = time.time()
duration = 0
if(verbose): print( '>', samplename, ': starting tree making')
for i, propertyArray in enumerate(tbdata):
count += 1
start_one = time.time()
treemap.addElem_simp(propertyArray)
end_one = time.time()
duration += float(end_one - start_one)
if count == verbcount:
if(verbose): print( '>', samplename, ': processed images', i-verbcount+1, '-', i+1, '(on '+str(len(tbdata))+') in %.2f' % duration, 'sec (~ %.3f' % (duration/float(verbcount)), 'per image)')
count = 0
duration = 0
end = time.time()
if(verbose): print( '>', samplename, ': tree making took : %.2f' % float(end - start), 'sec for', len(tbdata), 'images')
return treemap
# ---------------------------------------------------------------------------------------- #
# Class for multi-dimensional healpix map that can be
# created and processed in parallel.
class HealTree:
# Initialise and create array of pixels
def __init__(self, nside):
self.nside = nside
self.npix = 12*nside**2
self.pixlist = np.zeros(self.npix, dtype=object)
# Process image and absorb its properties
def addElem(self, propertyArray, ratiores, pixoffset):
# Retrieve pixel indices
ipixels, weights, thetas_c, phis_c, subpixrings = computeHPXpix_sequ_new(self.nside, propertyArray, pixoffset=pixoffset, ratiores=ratiores)
# DEBUGGING - MARCM
#print "deguging ipix addElem", ipixels
# For each pixel, absorb image properties
for ii, (ipix, weight) in enumerate(zip(ipixels, weights)):
if self.pixlist[ipix] == 0:
self.pixlist[ipix] = NDpix(propertyArray, subpixrings[ii,:], ratiores)
else:
self.pixlist[ipix].addElem(propertyArray, subpixrings[ii,:])
def addElem_simp(self, propertyArray):
#AJR hack
# Retrieve non-conservative pixel indices, no oversampling, just the pixels with centers in the CCD
ipixels = computeHPXpix_sequ_new_simp(self.nside, propertyArray)
# For each pixel, absorb image properties
#if ipixels == -1:
# return True
#if len(i
for ipix in ipixels:
if self.pixlist[ipix] == 0:
self.pixlist[ipix] = NDpix_simp(propertyArray)
else:
self.pixlist[ipix].addElem(propertyArray)
# Project HealTree into partial Healpix map
# for a given property and operation applied to its array of images
def project_partial(self, property, weights, operation, pool=None):
ind = np.where(self.pixlist != 0)
pixel = np.arange(self.npix)[ind]
verbcount = pixel.size / 10
count = 0
start = time.time()
duration = 0
signal = np.zeros(pixel.size)
for i, pix in enumerate(self.pixlist[ind]):
count += 1
start_one = time.time()
signal[i] = pix.project(property, weights, operation)
end_one = time.time()
duration += float(end_one - start_one)
if count == verbcount:
if(verbose): print( '>', property, weights, operation, ': processed pixels', i-verbcount+1, '-', i+1, '(on '+str(pixel.size)+') in %.1e' % duration, 'sec (~ %.1e' % (duration/float(verbcount)), 'per pixel)')
count = 0
duration = 0
end = time.time()
print( '> Projection', property, weights, operation, ' took : %.2f' % float(end - start), 'sec for', pixel.size, 'pixels')
#signal = [pix.project(property, weights, operation) for pix in self.pixlist[ind]]
return pixel, signal
# Project HealTree into regular Healpix map
# for a given property and operation applied to its array of images
def project(self, property, weights, operation, pool=None):
outmap = np.zeros(self.npix)
outmap.fill(hp.UNSEEN)
if pool is None:
for ipix, pix in enumerate(self.pixlist):
if pix != 0:
outmap[ipix] = pix.project(property, weights, operation)
else:
outmap = np.array( pool.map( projectNDpix, [ (pix, property, weights, operation) for pix in self.pixlist ] ) )
return outmap
# ---------------------------------------------------------------------------------------- #
def makeHpxMap(args):
healtree, property, weights, operation = args
return healtree.project(property, weights, operation)
# ---------------------------------------------------------------------------------------- #
def makeHpxMap_partial(args):
healtree, property, weights, operation = args
return healtree.project_partial(property, weights, operation)
# ---------------------------------------------------------------------------------------- #
def addElemHealTree(args):
healTree, propertyArray, ratiores = args
healTree.addElem(propertyArray, ratiores)
# ---------------------------------------------------------------------------------------- #
# Process image and absorb its properties
def addElem(args):
iarr, tbdatadtype, propertyArray, nside, propertiesToKeep, ratiores = args
propertyArray.dtype = tbdatadtype
if(verbose): print( 'Processing image', iarr, propertyArray['RA'])
# Retrieve pixel indices
ipixels, weights, thetas_c, phis_c = computeHPXpix_sequ_new(nside, propertyArray, pixoffset=pixoffset, ratiores=ratiores)
print( 'Processing image', iarr, thetas_c, phis_c)
# For each pixel, absorb image properties
for ipix, weight in zip(ipixels, weights):
if globalTree[ipix] == 0:
globalTree[ipix] = NDpix(propertyArray, propertiesToKeep, weight=weight)
else:
globalTree[ipix].addElem(propertyArray, propertiesToKeep, weight=weight)
# ---------------------------------------------------------------------------------------- #
# Read and project a Healtree into Healpix maps, and write them.
def project_and_write_maps(mode, propertiesweightsoperations, tbdata, catalogue_name, outrootdir, sample_names, inds, nside, ratiores, pixoffset, nsidesout=None):
resol_prefix = 'nside'+str(nside)+'_oversamp'+str(ratiores)
outroot = outrootdir + '/' + catalogue_name + '/' + resol_prefix + '/'
mkdir_p(outroot)
if mode == 1: # Fully sequential
for sample_name, ind in zip(sample_names, inds):
#print len(tbdata[ind]['ra1'])
#plt.plot(tbdata[ind]['ra1'],tbdata[ind]['dec1'],'k,')
#plt.show()
treemap = makeHealTree( (catalogue_name+'_'+sample_name, nside, ratiores, pixoffset, np.array(tbdata[ind])) )
for property, weights, operation in propertiesweightsoperations:
cutmap_indices, cutmap_signal = makeHpxMap_partial( (treemap, property, weights, operation) )
if nsidesout is None:
fname = outroot + '_'.join([catalogue_name, sample_name, resol_prefix, property, weights, operation]) + '.fits'
print( 'Creating and writing', fname)
write_partial_map(fname, cutmap_indices, cutmap_signal, nside, nest=False)
else:
cutmap_indices_nest = hp.ring2nest(nside, cutmap_indices)
outmap_hi = np.zeros(hp.nside2npix(nside))
outmap_hi.fill(0.0) #outmap_hi.fill(hp.UNSEEN)
outmap_hi[cutmap_indices_nest] = cutmap_signal
for nside_out in nsidesout:
if nside_out == nside:
outmap_lo = outmap_hi
else:
outmap_lo = hp.ud_grade(outmap_hi, nside_out, order_in='NESTED', order_out='NESTED')
resol_prefix2 = 'nside'+str(nside_out)+'from'+str(nside)+'o'+str(ratiores)
outroot2 = outrootdir + '/' + catalogue_name + '/' + resol_prefix2 + '/'
mkdir_p(outroot2)
fname = outroot2 + '_'.join([catalogue_name, sample_name, resol_prefix2, property, weights, operation]) + '.fits'
print( 'Writing', fname)
hp.write_map(fname, outmap_lo, nest=True)
subprocess.call("gzip -f "+fname,shell=True)
if mode == 3: # Fully parallel
pool = Pool(len(inds))
print( 'Creating HealTrees')
treemaps = pool.map( makeHealTree,
[ (catalogue_name+'_'+samplename, nside, ratiores, pixoffset, np.array(tbdata[ind]))
for samplename, ind in zip(sample_names, inds) ] )
for property, weights, operation in propertiesweightsoperations:
print( 'Making maps for', property, weights, operation)
outmaps = pool.map( makeHpxMap_partial,
[ (treemap, property, weights, operation) for treemap in treemaps ] )
for sample_name, outmap in zip(sample_names, outmaps):
fname = outroot + '_'.join([catalogue_name, sample_name, resol_prefix, property, weights, operation]) + '.fits'
print( 'Writing', fname)
cutmap_indices, cutmap_signal = outmap
write_partial_map(fname, cutmap_indices, cutmap_signal, nside, nest=False)
if mode == 2: # Parallel tree making and sequential writing
pool = Pool(len(inds))
print( 'Creating HealTrees')
treemaps = pool.map( makeHealTree,
[ (catalogue_name+'_'+samplename, nside, ratiores, pixoffset, np.array(tbdata[ind]))
for samplename, ind in zip(sample_names, inds) ] )
for property, weights, operation in propertiesweightsoperations:
for sample_name, treemap in zip(sample_names, treemaps):
fname = outroot + '_'.join([catalogue_name, sample_name, resol_prefix, property, weights, operation]) + '.fits'
print('Writing', fname)
#outmap = makeHpxMap( (treemap, property, weights, operation) )
#hp.write_map(fname, outmap, nest=False)
cutmap_indices, cutmap_signal = makeHpxMap_partial( (treemap, property, weights, operation) )
write_partial_map(fname, cutmap_indices, cutmap_signal, nside, nest=False)
def project_and_write_maps_simp(mode, propertiesweightsoperations, tbdata, catalogue_name, outrootdir, sample_names, inds, nside):
#hack by AJR and MarcM
#nwrong = 0 #number of wrong projected pixels
resol_prefix = 'nside'+str(nside)+'_oversamp1'
outroot = outrootdir + '/' + catalogue_name + '/' + resol_prefix + '/'
mkdir_p(outroot)
for sample_name, ind in zip(sample_names, inds):
treemap = makeHealTree_simp( (catalogue_name+'_'+sample_name, nside, np.array(tbdata[ind])) )
for property, weights, operation in propertiesweightsoperations:
cutmap_indices, cutmap_signal = makeHpxMap_partial( (treemap, property, weights, operation) )
fname = outroot + '_'.join([catalogue_name, sample_name, resol_prefix, property, weights, operation]) + '.fits'
print('Creating and writing', fname)
write_partial_map(fname, cutmap_indices, cutmap_signal, nside, nest=False)
#print "number of wrong projected ccd-pointings is: ", nwrong
# ---------------------------------------------------------------------------------------- #
def test():
fname = '/Users/bl/Dropbox/Projects/Quicksip/data/SVA1_COADD_ASTROM_PSF_INFO.fits'
#fname = '/Users/bl/Dropbox/Projects/Quicksip/data/Y1A1_IMAGEINFO_and_COADDINFO.fits'
pixoffset = 10
hdulist = pyfits.open(fname)
tbdata = hdulist[1].data
hdulist.close()
nside = 1024
ratiores = 4
treemap = HealTree(nside)
#results = pool.map(treemap.addElem, [imagedata for imagedata in tbdata])
print( tbdata.dtype)
#ind = np.ndarray([0])
ind = np.where( tbdata['band'] == 'i' )
import numpy.random
ind = numpy.random.choice(ind[0], 1 )
print( 'Number of images :', len(ind))
hpxmap = np.zeros(hp.nside2npix(nside))
ras_c = []
decs_c = []
for i, propertyArray in enumerate(tbdata[ind]):
ras_c.append(propertyArray['RA'])
decs_c.append(propertyArray['DEC'])
plt.figure()
for i, propertyArray in enumerate(tbdata[ind]):
print(i)
propertyArray.dtype = tbdata.dtype
listpix, weights, thetas_c, phis_c, listpix_sup = computeHPXpix_sequ_new(nside, propertyArray, pixoffset=pixoffset, ratiores=ratiores)
#listpix2, weights2, thetas_c2, phis_c2 = computeHPXpix_sequ(nside, propertyArray, pixoffset=pixoffset, ratiores=ratiores)
hpxmap = np.zeros(hp.nside2npix(nside))
hpxmap[listpix] = weights
hpxmap_sup = np.zeros(hp.nside2npix(ratiores*nside))
hpxmap_sup[listpix_sup] = 1.0
listpix_hi, weights_hi, thetas_c_hi, phis_c_hi, superind_hi = computeHPXpix_sequ_new(ratiores*nside, propertyArray, pixoffset=pixoffset, ratiores=1)
hpxmap_hi = np.zeros(hp.nside2npix(ratiores*nside))
hpxmap_hi[listpix_hi] = weights_hi
hpxmap_hitolo = hp.ud_grade(hpxmap_hi, nside)
print('valid hpxmap_hi', np.where(hpxmap_hi > 0)[0])
print('hpxmap', zip(np.where(hpxmap > 0)[0], hpxmap[hpxmap > 0]))
print('hpxmap_sup', zip(np.where(hpxmap_sup > 0)[0], hpxmap_sup[hpxmap_sup > 0]))
print('hpxmap_hitolo', zip(np.where(hpxmap_hitolo > 0)[0], hpxmap_hitolo[hpxmap_hitolo > 0]))
hp.gnomview(hpxmap_hi, title='hpxmap_hi', rot=[propertyArray['RA'], propertyArray['DEC']], reso=0.2)
hp.gnomview(hpxmap_sup, title='hpxmap_sup', rot=[propertyArray['RA'], propertyArray['DEC']], reso=0.2)
hp.gnomview(hpxmap_hitolo, title='hpxmap_hitolo', rot=[propertyArray['RA'], propertyArray['DEC']], reso=0.2)
hp.gnomview(hpxmap, title='hpxmap', rot=[propertyArray['RA'], propertyArray['DEC']], reso=0.2)
#plt.plot(phis_c, thetas_c)
thetas, phis = hp.pix2ang(nside, listpix)
#plt.scatter(phis, thetas, color='red', marker='o', s=50*weights)
#plt.scatter(propertyArray['RA']*np.pi/180, np.pi/2 - propertyArray['DEC']*np.pi/180)
#plt.text(propertyArray['RA']*np.pi/180, np.pi/2 - propertyArray['DEC']*np.pi/180, str(i))
plt.show()
stop
#if __name__ == "__main__":
# test()
|
gpl-2.0
| -6,887,529,562,619,616,000
| 42.075105
| 223
| 0.562613
| false
| 3.094701
| false
| false
| false
|
chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend
|
tests/unit/dataactvalidator/test_b7_object_class_program_activity_2.py
|
1
|
1566
|
from tests.unit.dataactcore.factories.staging import ObjectClassProgramActivityFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'b7_object_class_program_activity_2'
def test_column_headers(database):
expected_subset = {'row_number', 'gross_outlays_delivered_or_cpe', 'ussgl490200_delivered_orde_cpe',
'ussgl490800_authority_outl_cpe', 'ussgl498200_upward_adjustm_cpe'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Test Object Class Program Activity gross_outlays_delivered_or_cpe equals ussgl490200_delivered_orde_cpe +
ussgl490800_authority_outl_cpe + ussgl498200_upward_adjustm_cpe """
op = ObjectClassProgramActivityFactory(gross_outlays_delivered_or_cpe=3, ussgl490200_delivered_orde_cpe=1,
ussgl490800_authority_outl_cpe=1, ussgl498200_upward_adjustm_cpe=1)
assert number_of_errors(_FILE, database, models=[op]) == 0
def test_failure(database):
""" Test Object Class Program Activity gross_outlays_delivered_or_cpe doesn't equals ussgl490200_delivered_orde_cpe +
ussgl490800_authority_outl_cpe + ussgl498200_upward_adjustm_cpe """
op = ObjectClassProgramActivityFactory(gross_outlays_delivered_or_cpe=1, ussgl490200_delivered_orde_cpe=1,
ussgl490800_authority_outl_cpe=1, ussgl498200_upward_adjustm_cpe=1)
assert number_of_errors(_FILE, database, models=[op]) == 1
|
cc0-1.0
| -5,720,147,414,092,024,000
| 46.454545
| 121
| 0.714559
| false
| 3.189409
| true
| false
| false
|
openstack/ironic
|
tools/benchmark/do_not_run_create_benchmark_data.py
|
1
|
4622
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import time
from oslo_db.sqlalchemy import enginefacade
from sqlalchemy import sql
from ironic.common import service
from ironic.conf import CONF # noqa To Load Configuration
from ironic.objects import node
def _create_test_nodes():
print("Starting creation of fake nodes.")
start = time.time()
node_count = 10000
checkin = time.time()
for i in range(0, node_count):
new_node = node.Node({
'power_state': 'power off',
'driver': 'ipmi',
'driver_internal_info': {'test-meow': i},
'name': 'BenchmarkTestNode-%s' % i,
'driver_info': {
'ipmi_username': 'admin',
'ipmi_password': 'admin',
'ipmi_address': 'testhost%s.env.top.level.domain' % i},
'resource_class': 'CUSTOM_BAREMETAL',
'properties': {
'cpu': 4,
'memory': 32,
'cats': i,
'meowing': True}})
new_node.create()
delta = time.time() - checkin
if delta > 10:
checkin = time.time()
print('* At %s nodes, %0.02f seconds. Total elapsed: %s'
% (i, delta, time.time() - start))
created = time.time()
elapse = created - start
print('Created %s nodes in %s seconds.\n' % (node_count, elapse))
def _mix_up_nodes_data():
engine = enginefacade.writer.get_engine()
conn = engine.connect()
# A list of commands to mix up indexed field data a bit to emulate what
# a production database may somewhat look like.
commands = [
"UPDATE nodes set maintenance = True where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set driver = 'redfish' where RAND() < 0.5", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor01' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor02' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor03' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor04' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor05' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set reservation = 'fake_conductor06' where RAND() < 0.02", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'active' where RAND() < 0.8", # noqa Easier to read this way
"UPDATE nodes set power_state = 'power on' where provision_state = 'active' and RAND() < 0.95", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'available' where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'manageable' where RAND() < 0.1", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'clean wait' where RAND() < 0.05", # noqa Easier to read this way
"UPDATE nodes set provision_state = 'error' where RAND() < 0.05", # noqa Easier to read this way
"UPDATE nodes set owner = (select UUID()) where RAND() < 0.2", # noqa Easier to read this way
"UPDATE nodes set lessee = (select UUID()) where RAND() < 0.2", # noqa Easier to read this way
"UPDATE nodes set instance_uuid = (select UUID()) where RAND() < 0.95 and provision_state = 'active'", # noqa Easier to read this way
"UPDATE nodes set last_error = (select UUID()) where RAND() <0.05", # noqa Easier to read this way
]
start = time.time()
for command in commands:
print("Executing SQL command: \\" + command + ";\n")
conn.execute(sql.text(command))
print("* Completed command. %0.04f elapsed since start of commands."
% (time.time() - start))
def main():
service.prepare_service()
CONF.set_override('debug', False)
_create_test_nodes()
if __name__ == '__main__':
sys.exit(main())
|
apache-2.0
| -364,045,721,960,664,100
| 45.686869
| 142
| 0.618347
| false
| 3.674086
| false
| false
| false
|
CCI-MOC/GUI-Backend
|
api/v1/views/email.py
|
1
|
4618
|
"""
Atmosphere api email
"""
from rest_framework.response import Response
from rest_framework import status
from django.template.loader import render_to_string
from django.template import Context
from threepio import logger
from django_cyverse_auth.protocol.ldap import lookupEmail
from core.models import AtmosphereUser as User
from core.email import email_admin, resource_request_email
from api import failure_response
from api.v1.views.base import AuthAPIView
class Feedback(AuthAPIView):
"""
Post feedback via RESTful API
"""
def post(self, request):
"""
Creates a new feedback email and sends it to admins.
"""
required = ["message", "user-interface"]
missing_keys = check_missing_keys(request.data, required)
if missing_keys:
return keys_not_found(missing_keys)
result = self._email(request,
request.user.username,
lookupEmail(request.user.username),
request.data["message"])
return Response(result, status=status.HTTP_201_CREATED)
def _email(self, request, username, user_email, message):
"""
Sends an email to support based on feedback from a client machine
Returns a response.
"""
user = User.objects.get(username=username)
subject = 'Subject: Atmosphere Client Feedback from %s' % username
context = {
"user": user,
"feedback": message
}
body = render_to_string("core/email/feedback.html",
context=Context(context))
email_success = email_admin(request, subject, body, request_tracker=True)
if email_success:
resp = {'result':
{'code': 'success',
'meta': '',
'value': (
'Thank you for your feedback! '
'Support has been notified.')}}
else:
resp = {'result':
{'code': 'failed',
'meta': '',
'value': 'Failed to send feedback!'}}
return resp
class QuotaEmail(AuthAPIView):
"""
Post Quota Email via RESTful API.
"""
def post(self, request):
"""
Creates a new Quota Request email and sends it to admins.
"""
required = ["quota", "reason"]
missing_keys = check_missing_keys(request.data, required)
if missing_keys:
return keys_not_found(missing_keys)
logger.debug("request.data = %s" % (str(request.data)))
result = self._email(request,
request.user.username,
request.data["quota"],
request.data["reason"])
return Response(result, status=status.HTTP_201_CREATED)
def _email(self, request, username, new_resource, reason):
"""
Processes resource request increases. Sends email to the admins
Returns a response.
"""
return resource_request_email(request, username, new_resource, reason)
class SupportEmail(AuthAPIView):
def post(self, request):
"""
Creates a new support email and sends it to admins.
Post Support Email via RESTful API
"""
required = ["message", "subject", "user-interface"]
missing_keys = check_missing_keys(request.data, required)
if missing_keys:
return keys_not_found(missing_keys)
result = self._email(request,
request.data["subject"],
request.data["message"])
return Response(result, status=status.HTTP_201_CREATED)
def _email(self, request, subject, message):
"""
Sends an email to support.
POST Params expected:
* user
* message
* subject
Returns a response.
"""
email_success = email_admin(request, subject, message, request_tracker=True)
return {"email_sent": email_success}
def check_missing_keys(data, required_keys):
"""
Return any missing required post key names.
"""
return [key for key in required_keys
# Key must exist and have a non-empty value.
if key not in data or
(isinstance(data[key], str) and len(data[key]) > 0)]
def keys_not_found(missing_keys):
return failure_response(
status.HTTP_400_BAD_REQUEST,
"Missing required POST data variables : %s" % missing_keys)
|
apache-2.0
| 1,517,545,801,638,144,800
| 30.848276
| 84
| 0.569944
| false
| 4.514174
| false
| false
| false
|
neo1691/scorer.py
|
scorer/ui.py
|
1
|
1402
|
from curses import wrapper
import curses
import logging
logger = logging.getLogger('scorer.ui')
def printGames(stdscr, matches, selected):
stdscr.clear()
stdscr.addstr(0, 0, "The Following games \
are available Right now\n", curses.color_pair(1))
for index, game in enumerate(matches):
if index != selected:
stdscr.addstr(index+1, 10, game, curses.color_pair(0))
else:
stdscr.addstr(index+1, 10, game, curses.color_pair(2))
stdscr.refresh()
def main(stdscr, matches):
curses.curs_set(False)
selected = 0
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
while True:
printGames(stdscr, matches, selected)
event = stdscr.getch()
if event == ord("\n"):
logging.info("Enter key pressed")
return selected
elif event == curses.KEY_UP:
logging.info("Up key pressed")
if selected != 0:
selected -= 1
printGames(stdscr, matches, selected)
elif event == curses.KEY_DOWN:
logging.info("Down key pressed")
if selected != len(matches) - 1:
selected += 1
printGames(stdscr, matches, selected)
def getUserInput(matches):
selected = wrapper(main, matches)
return selected
|
gpl-2.0
| 5,232,536,504,837,891,000
| 30.155556
| 66
| 0.601284
| false
| 3.799458
| false
| false
| false
|
dymkowsk/mantid
|
scripts/Interface/reduction_gui/reduction/inelastic/dgs_sample_data_setup_script.py
|
1
|
12591
|
#pylint: disable=invalid-name
"""
Classes for each reduction step. Those are kept separately
from the the interface class so that the DgsReduction class could
be used independently of the interface implementation
"""
from __future__ import (absolute_import, division, print_function)
import os
import xml.dom.minidom
from reduction_gui.reduction.scripter import BaseScriptElement
class SampleSetupScript(BaseScriptElement):
sample_file = ""
live_button = False
output_wsname = ""
detcal_file = ""
relocate_dets = False
incident_energy_guess = ""
use_ei_guess = False
tzero_guess = 0.0
monitor1_specid = ""
monitor2_specid = ""
rebin_et = False
et_range_low = ""
et_range_width = ""
et_range_high = ""
et_is_distribution = True
hardmask_file = ""
grouping_file = ""
show_workspaces = False
savedir = ""
def __init__(self, inst_name):
super(SampleSetupScript, self).__init__()
self.set_default_pars(inst_name)
self.reset()
def set_default_pars(self, inst_name):
from Interface.reduction_gui.reduction.inelastic import dgs_utils
ip = dgs_utils.InstrumentParameters(inst_name)
SampleSetupScript.monitor1_specid = str(int(ip.get_parameter("ei-mon1-spec")))
SampleSetupScript.monitor2_specid = str(int(ip.get_parameter("ei-mon2-spec")))
def to_script(self):
script = ""
if not self.live_button:
script += "SampleInputFile=\"%s\",\n" % self.sample_file
else:
script += "SampleInputWorkspace=input,\n"
tmp_wsname = ""
if self.output_wsname == SampleSetupScript.output_wsname:
# Make a default name from the incoming file
tmp = os.path.split(os.path.splitext(str(self.sample_file))[0])[-1]
tmp_wsname = tmp + "_spe"
else:
tmp_wsname = self.output_wsname
script += "OutputWorkspace=\"%s\",\n" % tmp_wsname
if self.detcal_file != SampleSetupScript.detcal_file:
script += "DetCalFilename=\"%s\",\n" % self.detcal_file
if self.relocate_dets != SampleSetupScript.relocate_dets:
script += "RelocateDetectors=%s,\n" % self.relocate_dets
if self.incident_energy_guess != SampleSetupScript.incident_energy_guess:
script += "IncidentEnergyGuess=%s,\n" % float(self.incident_energy_guess)
if self.use_ei_guess != SampleSetupScript.use_ei_guess:
script += "UseIncidentEnergyGuess=%s,\n" % self.use_ei_guess
if self.tzero_guess != SampleSetupScript.tzero_guess:
script += "TimeZeroGuess=%s,\n" % str(self.tzero_guess)
if self.monitor1_specid != SampleSetupScript.monitor1_specid:
try:
temp1 = int(self.monitor1_specid)
script += "Monitor1SpecId=%s,\n" % temp1
except ValueError:
pass
if self.monitor2_specid != SampleSetupScript.monitor2_specid:
try:
temp2 = int(self.monitor2_specid)
script += "Monitor2SpecId=%s,\n" % temp2
except ValueError:
pass
if self.et_range_low != SampleSetupScript.et_range_low or \
self.et_range_width != SampleSetupScript.et_range_width or \
self.et_range_high != SampleSetupScript.et_range_high:
script += "EnergyTransferRange=\"%s,%s,%s\",\n" % (self.et_range_low,
self.et_range_width,
self.et_range_high)
if self.et_is_distribution != SampleSetupScript.et_is_distribution:
script += "SofPhiEIsDistribution=%s,\n" % self.et_is_distribution
if self.hardmask_file != SampleSetupScript.hardmask_file:
script += "HardMaskFile=\"%s\",\n" % self.hardmask_file
if self.grouping_file != SampleSetupScript.grouping_file:
script += "GroupingFile=\"%s\",\n" % self.grouping_file
if self.show_workspaces:
script += "ShowIntermediateWorkspaces=%s,\n" % self.show_workspaces
if self.savedir != SampleSetupScript.savedir:
script += "OutputDirectory=\"%s\",\n" % self.savedir
return script
def to_xml(self):
"""
Create XML from the current data.
"""
xml_str = "<SampleSetup>\n"
xml_str += " <sample_input_file>%s</sample_input_file>\n" % self.sample_file
xml_str += " <live_button>%s</live_button>\n" % self.live_button
xml_str += " <output_wsname>%s</output_wsname>\n" % self.output_wsname
xml_str += " <detcal_file>%s</detcal_file>\n" % self.detcal_file
xml_str += " <relocate_dets>%s</relocate_dets>\n" % self.relocate_dets
xml_str += " <incident_energy_guess>%s</incident_energy_guess>\n" % self.incident_energy_guess
xml_str += " <use_ei_guess>%s</use_ei_guess>\n" % str(self.use_ei_guess)
xml_str += " <tzero_guess>%s</tzero_guess>\n" % str(self.tzero_guess)
xml_str += " <monitor1_specid>%s</monitor1_specid>\n" % self.monitor1_specid
xml_str += " <monitor2_specid>%s</monitor2_specid>\n" % self.monitor2_specid
xml_str += " <et_range>\n"
xml_str += " <low>%s</low>\n" % self.et_range_low
xml_str += " <width>%s</width>\n" % self.et_range_width
xml_str += " <high>%s</high>\n" % self.et_range_high
xml_str += " </et_range>\n"
xml_str += " <sofphie_is_distribution>%s</sofphie_is_distribution>\n" % str(self.et_is_distribution)
xml_str += " <hardmask_file>%s</hardmask_file>\n" % self.hardmask_file
xml_str += " <grouping_file>%s</grouping_file>\n" % self.grouping_file
xml_str += " <show_workspaces>%s</show_workspaces>\n" % self.show_workspaces
xml_str += " <savedir>%s</savedir>\n" % self.savedir
xml_str += "</SampleSetup>\n"
return xml_str
def from_xml(self, xml_str):
"""
Read in data from XML
@param xml_str: text to read the data from
"""
dom = xml.dom.minidom.parseString(xml_str)
element_list = dom.getElementsByTagName("SampleSetup")
if len(element_list) > 0:
instrument_dom = element_list[0]
self.sample_file = BaseScriptElement.getStringElement(instrument_dom,
"sample_input_file",
default=SampleSetupScript.sample_file)
self.live_button = BaseScriptElement.getBoolElement(instrument_dom,
"live_button",
default=SampleSetupScript.live_button)
self.output_wsname = BaseScriptElement.getStringElement(instrument_dom,
"output_wsname",
default=SampleSetupScript.output_wsname)
self.detcal_file = BaseScriptElement.getStringElement(instrument_dom,
"detcal_file",
default=SampleSetupScript.detcal_file)
self.relocate_dets = BaseScriptElement.getBoolElement(instrument_dom,
"relocate_dets",
default=SampleSetupScript.relocate_dets)
self.incident_energy_guess = BaseScriptElement.getStringElement(instrument_dom,
"incident_energy_guess",
default=SampleSetupScript.incident_energy_guess)
self.use_ei_guess = BaseScriptElement.getBoolElement(instrument_dom,
"use_ei_guess",
default=SampleSetupScript.use_ei_guess)
self.tzero_guess = BaseScriptElement.getFloatElement(instrument_dom,
"tzero_guess",
default=SampleSetupScript.tzero_guess)
self.monitor1_specid = BaseScriptElement.getStringElement(instrument_dom,
"monitor1_specid",
default=SampleSetupScript.monitor1_specid)
self.monitor2_specid = BaseScriptElement.getStringElement(instrument_dom,
"monitor2_specid",
default=SampleSetupScript.monitor2_specid)
self.et_range_low = BaseScriptElement.getStringElement(instrument_dom,
"et_range/low",
default=SampleSetupScript.et_range_low)
self.et_range_width = BaseScriptElement.getStringElement(instrument_dom,
"et_range/width",
default=SampleSetupScript.et_range_width)
self.et_range_high = BaseScriptElement.getStringElement(instrument_dom,
"et_range/high",
default=SampleSetupScript.et_range_high)
self.et_is_distribution = BaseScriptElement.getBoolElement(instrument_dom,
"sofphie_is_distribution",
default=SampleSetupScript.et_is_distribution)
self.hardmask_file = BaseScriptElement.getStringElement(instrument_dom,
"hardmask_file",
default=SampleSetupScript.hardmask_file)
self.grouping_file = BaseScriptElement.getStringElement(instrument_dom,
"grouping_file",
default=SampleSetupScript.grouping_file)
self.show_workspaces = BaseScriptElement.getBoolElement(instrument_dom,
"show_workspaces",
default=SampleSetupScript.show_workspaces)
self.savedir = BaseScriptElement.getStringElement(instrument_dom,
"savedir",
default=SampleSetupScript.savedir)
def reset(self):
"""
Reset state
"""
self.sample_file = SampleSetupScript.sample_file
self.live_button = SampleSetupScript.live_button
self.output_wsname = SampleSetupScript.output_wsname
self.detcal_file = SampleSetupScript.detcal_file
self.relocate_dets = SampleSetupScript.relocate_dets
self.incident_energy_guess = SampleSetupScript.incident_energy_guess
self.use_ei_guess = SampleSetupScript.use_ei_guess
self.tzero_guess = SampleSetupScript.tzero_guess
self.monitor1_specid = SampleSetupScript.monitor1_specid
self.monitor2_specid = SampleSetupScript.monitor2_specid
self.rebin_et = SampleSetupScript.rebin_et
self.et_range_low = SampleSetupScript.et_range_low
self.et_range_width = SampleSetupScript.et_range_width
self.et_range_high = SampleSetupScript.et_range_high
self.et_is_distribution = SampleSetupScript.et_is_distribution
self.hardmask_file = SampleSetupScript.hardmask_file
self.grouping_file = SampleSetupScript.grouping_file
self.show_workspaces = SampleSetupScript.show_workspaces
self.savedir = SampleSetupScript.savedir
|
gpl-3.0
| 7,636,383,482,298,824,000
| 57.562791
| 124
| 0.52339
| false
| 4.368841
| false
| false
| false
|
opencast/pyCA
|
pyca/db.py
|
1
|
6175
|
# -*- coding: utf-8 -*-
'''
pyca.db
~~~~¨~~
Database specification for pyCA
'''
import json
import os.path
import string
from pyca.config import config
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, Text, LargeBinary, DateTime, \
create_engine
from sqlalchemy.orm import sessionmaker
from datetime import datetime
from functools import wraps
Base = declarative_base()
def init():
'''Initialize connection to database. Additionally the basic database
structure will be created if nonexistent.
'''
global engine
engine = create_engine(config('agent', 'database'))
Base.metadata.create_all(engine)
def get_session():
'''Get a session for database communication. If necessary a new connection
to the database will be established.
:return: Database session
'''
if 'engine' not in globals():
init()
Session = sessionmaker(bind=engine)
return Session()
def with_session(f):
"""Wrapper for f to make a SQLAlchemy session present within the function
:param f: Function to call
:type f: Function
:raises e: Possible exception of f
:return: Result of f
"""
@wraps(f)
def decorated(*args, **kwargs):
session = get_session()
try:
result = f(session, *args, **kwargs)
except Exception as e:
session.rollback()
raise e
finally:
session.close()
return result
return decorated
class Constants():
@classmethod
def str(cls, value):
'''Convert status (id) to its string name.'''
for k, v in cls.__dict__.items():
if k[0] in string.ascii_uppercase and v == value:
return k.lower().replace('_', ' ')
class Status(Constants):
'''Event status definitions
'''
UPCOMING = 1
RECORDING = 2
FAILED_RECORDING = 3
FINISHED_RECORDING = 4
UPLOADING = 5
FAILED_UPLOADING = 6
FINISHED_UPLOADING = 7
PARTIAL_RECORDING = 8
PAUSED_AFTER_RECORDING = 9
class ServiceStatus(Constants):
'''Service status type definitions
'''
STOPPED = 1
IDLE = 2
BUSY = 3
class Service(Constants):
'''Service type definitions
'''
AGENTSTATE = 1
CAPTURE = 2
INGEST = 3
SCHEDULE = 4
# Database Schema Definition
class BaseEvent():
'''Database definition of an event.'''
__tablename__ = 'event'
uid = Column('uid', Text(), nullable=False, primary_key=True)
start = Column('start', Integer(), primary_key=True)
end = Column('end', Integer(), nullable=False)
title = Column('title', Text())
data = Column('data', LargeBinary(), nullable=False)
status = Column('status', Integer(), nullable=False,
default=Status.UPCOMING)
tracks = Column('tracks', LargeBinary(), nullable=True)
def get_data(self):
'''Load JSON data from event.
'''
return json.loads(self.data.decode('utf-8'))
def set_data(self, data):
'''Store data as JSON.
'''
# Python 3 wants bytes
self.data = json.dumps(data).encode('utf-8')
def name(self):
'''Returns the filesystem name of this event.
'''
return 'recording-%i-%s' % (self.start, self.uid)
def directory(self):
'''Returns recording directory of this event.
'''
return os.path.join(config('capture', 'directory'), self.name())
def remaining_duration(self, time):
'''Returns the remaining duration for a recording.
'''
return max(0, self.end - max(self.start, time))
def status_str(self):
'''Return status as string.
'''
return Status.str(self.status)
def get_tracks(self):
'''Load JSON track data from event.
'''
if not self.tracks:
return []
return json.loads(self.tracks.decode('utf-8'))
def set_tracks(self, tracks):
'''Store track data as JSON.
'''
self.tracks = json.dumps(tracks).encode('utf-8')
def __repr__(self):
'''Return a string representation of an artist object.
:return: String representation of object.
'''
return '<Event(start=%i, uid="%s")>' % (self.start, self.uid)
def serialize(self):
'''Serialize this object as dictionary usable for conversion to JSON.
:return: Dictionary representing this object.
'''
return {
'type': 'event',
'id': self.uid,
'attributes': {
'start': self.start,
'end': self.end,
'uid': self.uid,
'title': self.title,
'data': self.get_data(),
'status': Status.str(self.status)
}
}
class UpcomingEvent(Base, BaseEvent):
'''List of upcoming events'''
__tablename__ = 'upcoming_event'
class RecordedEvent(Base, BaseEvent):
'''List of events pyca tried to record.'''
__tablename__ = 'recorded_event'
def __init__(self, event=None):
if event:
self.uid = event.uid
self.start = event.start
self.end = event.end
self.title = event.title
self.data = event.data
self.status = event.status
class ServiceStates(Base):
'''List of internal service states.'''
__tablename__ = 'service_states'
type = Column('type', Integer(), nullable=False, primary_key=True)
status = Column('status', Integer(), nullable=False,
default=ServiceStatus.STOPPED)
def __init__(self, service=None):
if service:
self.type = service.type
self.status = service.status
class UpstreamState(Base):
'''State of the upstream Opencast server.'''
__tablename__ = 'upstream_state'
url = Column('url', Text(), primary_key=True)
last_synced = Column('last_synced', DateTime())
@staticmethod
def update_sync_time(url):
s = get_session()
s.merge(UpstreamState(url=url, last_synced=datetime.utcnow()))
s.commit()
s.close()
|
lgpl-3.0
| 1,113,225,229,775,429,800
| 25.050633
| 78
| 0.587302
| false
| 4.127005
| false
| false
| false
|
vhb/dotfiles
|
vim/ycm_extra_conf.py
|
1
|
2505
|
import os
import ycm_core
from clang_helpers import PrepareClangFlags
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# These are the compilation flags that will be used in case there's no
# compilation database set.
flags = [
'-Wall',
'-W',
'-Wextra',
'-std=c++11',
'-stdlib=libc++',
'-x',
'c++',
'-I',
'.',
'-I',
'/usr/include/c++/4.2.1/'
]
if compilation_database_folder:
database = ycm_core.CompilationDatabase(compilation_database_folder)
else:
database = None
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return flags
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FlagsForFile(filename):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = database.GetCompilationInfoForFile(filename)
final_flags = PrepareClangFlags(
MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_),
filename)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(flags, relative_to)
return {
'flags': final_flags,
'do_cache': True}
|
mit
| -1,765,271,193,283,489,800
| 28.821429
| 79
| 0.625948
| false
| 3.951104
| false
| false
| false
|
Lenchik13/Testing
|
test/test_edit_contact.py
|
1
|
1223
|
from model.contact import Contact
import random
def test_edit_contact(app, db, check_ui):
app.open_home_page()
if app.contact.count() == 0:
app.contact.create(Contact(firstname="Contact", lastname="", nickname="",
address="", company="", home="",
mobile="", work="", fax="", email="",
email2="", email3="", homepage="",
byear="", address2="", phone2="",
notes="", bday="20", bmonth="6"))
old_contacts = db.get_contact_list()
rcontact = random.choice(old_contacts)
contact = Contact(lastname="lname", firstname="fname", address="address")
contact.id = rcontact.id
app.contact.modify_contact_by_id(contact)
app.open_home_page()
assert len(old_contacts) == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.remove(rcontact)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
apache-2.0
| -8,770,590,787,177,672,000
| 42.678571
| 123
| 0.58381
| false
| 3.821875
| false
| false
| false
|
igorcoding/forum-api
|
api/api_helpers/common_helper.py
|
1
|
1533
|
import json
def required(param_list, args):
for param in param_list:
if type(param) != str:
raise Exception("param must be a string value")
if param not in args:
raise Exception("%s is required." % (param,))
def semi_required(param_variations, args):
atleast = False
all = True
for param in param_variations:
arg = param in args
atleast = atleast or arg
all = all and arg
if all:
raise Exception("All variations cannot be in one request simultaneously")
if not atleast:
raise Exception("None of variations is in the arguments list")
def optional(param, args, default=None, possible_values=None):
if param not in args:
args[param] = default
try:
args[param] = json.loads(args[param], encoding='utf-8')
except:
args[param] = args[param]
def check_arg(arg, values):
if arg not in values:
raise Exception("%s not in %s" % (arg, values))
if type(args[param]) == list and type(possible_values) == list:
for arg in args[param]:
check_arg(arg, possible_values)
if type(args[param]) != list and type(possible_values) == list:
check_arg(args[param], possible_values)
def make_boolean(params, arr):
for param in params:
arr[param] = bool(arr[param])
def check_empty(res, message):
if not res or len(res) == 0:
raise Exception(message)
def date_to_str(date):
return date.strftime("%Y-%m-%d %H:%M:%S")
|
mit
| 8,430,216,498,991,779,000
| 25.448276
| 81
| 0.609915
| false
| 3.720874
| false
| false
| false
|
hfalcic/PyKCS11
|
samples/dumpit.py
|
1
|
10079
|
#!/usr/bin/env python
# Copyright (C) 2006-2008 Ludovic Rousseau (ludovic.rousseau@free.fr)
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from __future__ import print_function
import PyKCS11
import binascii
import getopt
import sys
import platform
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/142812
# Title: Hex dumper
# Submitter: Sebastien Keim (other recipes)
# Last Updated: 2002/08/05
# Version no: 1.0
def hexx(intval):
x = hex(intval)[2:]
if (x[-1:].upper() == 'L'):
x = x[:-1]
if len(x) % 2 != 0:
return "0%s" % x
return x
def dump(src, length=8):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
N = 0
result = ''
while src:
s, src = src[:length], src[length:]
hexa = ' '.join(["%02X" % ord(x) for x in s])
s = s.translate(FILTER)
result += "%04X %-*s %s\n" % (N, length * 3, hexa, s)
N += length
return result
def usage():
print("Usage:", sys.argv[0], end=' ')
print("[-p pin][--pin=pin] (use --pin=NULL for pinpad)", end=' ')
print("[-c lib][--lib=lib]", end=' ')
print("[-S][--sign]", end=' ')
print("[-d][--decrypt]", end=' ')
print("[-h][--help]", end=' ')
try:
opts, args = getopt.getopt(sys.argv[1:], "p:c:Sd:h", ["pin=", "lib=", "sign", "decrypt", "help"])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
pin_available = False
decrypt = sign = False
lib = None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-p", "--pin"):
pin = a
if pin == "NULL":
pin = None
pin_available = True
elif o in ("-c", "--lib"):
lib = a
print("using PKCS11 lib:", lib)
elif o in ("-S", "--sign"):
sign = True
elif o in ("-d", "--decrypt"):
decrypt = True
red = blue = magenta = normal = ""
if sys.stdout.isatty() and platform.system().lower() != 'windows':
red = "\x1b[01;31m"
blue = "\x1b[34m"
magenta = "\x1b[35m"
normal = "\x1b[0m"
format_long = magenta + " %s:" + blue + " %s (%s)" + normal
format_binary = magenta + " %s:" + blue + " %d bytes" + normal
format_normal = magenta + " %s:" + blue + " %s" + normal
pkcs11 = PyKCS11.PyKCS11Lib()
pkcs11.load(lib)
info = pkcs11.getInfo()
print("Library manufacturerID:", info.manufacturerID)
slots = pkcs11.getSlotList()
print("Available Slots:", len(slots))
for s in slots:
try:
i = pkcs11.getSlotInfo(s)
print("Slot no:", s)
print(format_normal % ("slotDescription", i.slotDescription.strip()))
print(format_normal % ("manufacturerID", i.manufacturerID.strip()))
t = pkcs11.getTokenInfo(s)
print("TokenInfo")
print(format_normal % ("label", t.label.strip()))
print(format_normal % ("manufacturerID", t.manufacturerID.strip()))
print(format_normal % ("model", t.model.strip()))
session = pkcs11.openSession(s)
print("Opened session 0x%08X" % session.session.value())
if pin_available:
try:
session.login(pin=pin)
except:
print("login failed, exception:", str(sys.exc_info()[1]))
objects = session.findObjects()
print()
print("Found %d objects: %s" % (len(objects), [x.value() for x in objects]))
all_attributes = list(PyKCS11.CKA.keys())
# remove the CKR_ATTRIBUTE_SENSITIVE attributes since we can't get
# their values and will get an exception instead
all_attributes.remove(PyKCS11.CKA_PRIVATE_EXPONENT)
all_attributes.remove(PyKCS11.CKA_PRIME_1)
all_attributes.remove(PyKCS11.CKA_PRIME_2)
all_attributes.remove(PyKCS11.CKA_EXPONENT_1)
all_attributes.remove(PyKCS11.CKA_EXPONENT_2)
all_attributes.remove(PyKCS11.CKA_COEFFICIENT)
# only use the integer values and not the strings like 'CKM_RSA_PKCS'
all_attributes = [e for e in all_attributes if isinstance(e, int)]
for o in objects:
print()
print((red + "==================== Object: %d ====================" + normal) % o.value())
attributes = session.getAttributeValue(o, all_attributes)
attrDict = dict(list(zip(all_attributes, attributes)))
if attrDict[PyKCS11.CKA_CLASS] == PyKCS11.CKO_PRIVATE_KEY \
and attrDict[PyKCS11.CKA_KEY_TYPE] == PyKCS11.CKK_RSA:
m = attrDict[PyKCS11.CKA_MODULUS]
e = attrDict[PyKCS11.CKA_PUBLIC_EXPONENT]
if m and e:
mx = eval(b'0x' + binascii.hexlify(''.join(chr(c) for c in m).encode('ascii')))
ex = eval(b'0x' + binascii.hexlify(''.join(chr(c) for c in e).encode('ascii')))
if sign:
try:
toSign = b"12345678901234567890" # 20 bytes, SHA1 digest
print("* Signing with object 0x%08X following data: %s" % (o.value(), toSign))
signature = session.sign(o, toSign)
s = binascii.hexlify(''.join(chr(c) for c in signature).encode('ascii'))
sx = eval(b'0x' + s)
print("Signature:")
print(dump(''.join(map(chr, signature)), 16))
if m and e:
print("Verifying using following public key:")
print("Modulus:")
print(dump(''.join(map(chr, m)), 16))
print("Exponent:")
print(dump(''.join(map(chr, e)), 16))
decrypted = pow(sx, ex, mx) # RSA
print("Decrypted:")
d = binascii.unhexlify(hexx(decrypted))
print(dump(d, 16))
if toSign == d[-20:]:
print("*** signature VERIFIED!\n")
else:
print("*** signature NOT VERIFIED; decrypted value:")
print(hex(decrypted), "\n")
else:
print("Unable to verify signature: MODULUS/PUBLIC_EXP not found")
except:
print("Sign failed, exception:", str(sys.exc_info()[1]))
if decrypt:
if m and e:
try:
toEncrypt = "12345678901234567890"
# note: PKCS1 BT2 padding should be random data,
# but this is just a test and we use 0xFF...
padded = "\x00\x02%s\x00%s" % ("\xFF" * (128 - (len(toEncrypt)) - 3), toEncrypt)
padded = padded.encode('latin-1')
print("* Decrypting with 0x%08X following data: %s" % (o.value(), toEncrypt))
print("padded:\n", dump(padded, 16))
encrypted = pow(eval('0x%sL' % binascii.hexlify(padded)), ex, mx) # RSA
encrypted1 = binascii.unhexlify(hexx(encrypted))
print("encrypted:\n", dump(encrypted1, 16))
decrypted = session.decrypt(o, encrypted1)
decrypted1 = ''.join(chr(i) for i in decrypted)
print("decrypted:\n", dump(decrypted1, 16))
if decrypted1 == toEncrypt:
print("decryption SUCCESSFULL!\n")
else:
print("decryption FAILED!\n")
except:
print("Decrypt failed, exception:", str(sys.exc_info()[1]))
else:
print("ERROR: Private key don't have MODULUS/PUBLIC_EXP")
print("Dumping attributes:")
for q, a in zip(all_attributes, attributes):
if a == None:
# undefined (CKR_ATTRIBUTE_TYPE_INVALID) attribute
continue
if q == PyKCS11.CKA_CLASS:
print(format_long % (PyKCS11.CKA[q], PyKCS11.CKO[a], a))
elif q == PyKCS11.CKA_CERTIFICATE_TYPE:
print(format_long % (PyKCS11.CKA[q], PyKCS11.CKC[a], a))
elif q == PyKCS11.CKA_KEY_TYPE:
print(format_long % (PyKCS11.CKA[q], PyKCS11.CKK[a], a))
elif session.isBin(q):
print(format_binary % (PyKCS11.CKA[q], len(a)))
if a:
print(dump(''.join(map(chr, a)), 16), end=' ')
elif q == PyKCS11.CKA_SERIAL_NUMBER:
print(format_binary % (PyKCS11.CKA[q], len(a)))
if a:
print(dump(a, 16), end=' ')
else:
print(format_normal % (PyKCS11.CKA[q], a))
print()
if pin_available:
try:
session.logout()
except:
print("logout failed, exception:", str(sys.exc_info()[1]))
session.closeSession()
except PyKCS11.PyKCS11Error as e:
print("Error:", e)
|
gpl-2.0
| 4,655,479,188,176,520,000
| 40.477366
| 108
| 0.509574
| false
| 3.765035
| false
| false
| false
|
cineuse/CNCGToolKit
|
cgtkLibs/cgtk_os/delete_folder.py
|
1
|
1091
|
# coding=utf8
# Copyright (c) 2016 CineUse
import os
import shutil
import logging
import cgtk_log
log = cgtk_log.cgtk_log(level=logging.INFO)
def delete_folder(src):
"""
Deletes all files from inside a folder
.. warning::
This will delete all files in the folder specified
Args:
src (basestring): directory to clean
"""
if os.path.isfile(src):
try:
os.remove(src)
log.info(src)
except IOError:
pass
elif os.path.isdir(src):
try:
shutil.rmtree(src)
log.info(src)
except IOError:
for roots, dirs, files in os.walk(src):
for d in dirs:
itemsrc = os.path.join(roots, d)
for f in os.listdir(itemsrc):
itemfile = os.path.join(itemsrc, f)
try:
delete_folder(itemfile)
except IOError:
pass
if __name__ == "__main__":
delete_folder(r"E:\temp\needclear")
|
mit
| 3,809,987,850,486,803,500
| 22.717391
| 59
| 0.499542
| false
| 4.148289
| false
| false
| false
|
LinkCareServices/cairotft
|
cairotft/widgets/base.py
|
1
|
5884
|
# Copyright (c) 2015, Thomas Chiroux - Link Care Services
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of cairotft nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""base widget class."""
class BaseWidget():
"""Base class for all widgets.
:ivar display_object: (:class:`cairotft.tft.TftDisplay`) The display
object the widget will display itself.
:ivar pos_x: (:py:class:`int`) x coordinates to display the widget
:ivar pos_y: (:py:class:`int`) y coordinates to display the widget
:ivar width: (:py:class:`int`) the width of the widget
:ivar height: (:py:class:`int`) the height of the widget
"""
def __init__(self, display_object,
pos_x, pos_y, width, height):
"""Initialisation of the base widget.
:param display_object: the Display class instanciation.
:type display_object: :class:`cairotft.tfy.TftDisplay`
:param int pos_x: x coordinates to display the widget
:param int pos_y: y coordinates to display the widget
:param int width: the width of the widget
:param int height: the height of the widget
"""
self.display_object = display_object
self.pos_x = pos_x
self.pos_y = pos_y
self.width = width
self.height = height
self._stop = False
self._showing = False
def draw(self, ctx):
"""draw the widget.
implement this method in your subclasses
"""
raise NotImplementedError
def show(self, ctx):
"""show the icon."""
# here call the draw method (which includes the eventual blit)
self.draw(ctx)
def start(self, ctx):
"""Start showing the widget."""
self.display_object.loop.call_soon(
self.show, ctx)
def stop(self):
"""stop showing the widget."""
pass
class BaseAnimatedWidget(BaseWidget):
"""Base class for all Animated widgets.
see :class:`BaseWidget` for All BaseWidget variables
:ivar float interval_time: (:py:class:`float`) interval between
two frames (in seconds)
TODO: add transition support in BaseAnimatedWidget
"""
def __init__(self, display_object,
pos_x, pos_y, width, height,
interval_time=None):
"""Initialisation of the base animated widget.
:param display_object: the Display class instanciation.
:type display_object: :class:`cairotft.tfy.TftDisplay`
:param int pos_x: x coordinates to display the widget
:param int pos_y: y coordinates to display the widget
:param int width: the width of the widget
:param int height: the height of the widget
:param float interval_time: interval between two frames (in seconds)
the widget will first:
try to use the fps parameter to calculates a display interval
or: use the given interval_time
or: fix an interval time of 1second
"""
super().__init__(display_object, pos_x, pos_y, width, height)
if self.display_object.fps is not None and interval_time is not None:
self.interval_time = max(interval_time,
1 / self.display_object.fps)
elif self.display_object.fps is not None and interval_time is None:
self.interval_time = 1 / self.display_object.fps
elif self.display_object.fps is None and interval_time is not None:
self.interval_time = interval_time
else:
self.interval_time = 1
self._stop = False
self._showing = False
def draw(self, ctx):
"""draw the widget.
implement this method in your subclasses
"""
raise NotImplementedError
def show(self, ctx):
"""show the icon."""
if not self._stop:
# here call the draw method (which includes the eventual blit)
self._showing = True
self.draw(ctx)
# the call the next show
self.display_object.loop.call_later(
self.interval_time, self.show, ctx)
def start(self, ctx):
"""Start showing the widget."""
if not self._showing:
self._showing = True
self._stop = False
self.display_object.loop.call_soon(
self.show, ctx)
def stop(self):
"""stop showing the widget."""
self._stop = True
self._showing = False
|
bsd-3-clause
| 4,971,206,907,318,229
| 36.240506
| 79
| 0.64344
| false
| 4.355292
| false
| false
| false
|
ouh-churchill/quod
|
config/settings/staging.py
|
1
|
3522
|
#!/usr/bin/python
# coding: utf-8
from __future__ import absolute_import, unicode_literals
'''
Local settings
- Use djangosecure
'''
from .common import * # noqa
print("DEBUG: Loading settings from staging")
# Because we're behind a reverse proxy, pay attention to where the request is coming from
USE_X_FORWARDED_HOST = True
FORCE_SCRIPT_NAME = env('FORCE_SCRIPT_NAME', default='/quod/')
# django-secure
# ------------------------------------------------------------------------------
# INSTALLED_APPS += ["djangosecure", ]
# SECURITY_MIDDLEWARE = [
# 'djangosecure.middleware.SecurityMiddleware',
# ]
# MIDDLEWARE = SECURITY_MIDDLEWARE + MIDDLEWARE
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool("DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool("DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
# ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com']) -- In Common.py
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# END SITE CONFIGURATION
INSTALLED_APPS += ["gunicorn", ]
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = env('DJANGO_EMAIL_HOST', default='localhost')
EMAIL_PORT = 25
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='QUODsite <noreply@dev.nds.ox.ac.uk>')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[QUODsite] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(module)s %(process)d %(thread)d %(message)s'
}
},
'handlers': {
'gunicorn': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'verbose',
'filename': env('GUNICORN_ERRORS_LOGFILE', default='/tmp/quod.gunicorn.errors'),
'maxBytes': 1024 * 1024 * 100, # 100 mb
}
},
'loggers': {
'gunicorn.errors': {
'level': 'DEBUG',
'handlers': ['gunicorn'],
'propagate': True,
},
}
}
|
mit
| -4,037,620,605,904,640,000
| 33.871287
| 117
| 0.592561
| false
| 3.71519
| false
| false
| false
|
trevor/calendarserver
|
calendarserver/tools/push.py
|
1
|
4102
|
#!/usr/bin/env python
##
# Copyright (c) 2012-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from calendarserver.tools.cmdline import utilityMain, WorkerService
from argparse import ArgumentParser
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks
from twext.who.idirectory import RecordType
import time
log = Logger()
class DisplayAPNSubscriptions(WorkerService):
users = []
def doWork(self):
rootResource = self.rootResource()
directory = rootResource.getDirectory()
return displayAPNSubscriptions(self.store, directory, rootResource,
self.users)
def main():
parser = ArgumentParser(description='Display Apple Push Notification subscriptions')
parser.add_argument('-f', '--config', dest='configFileName', metavar='CONFIGFILE', help='caldavd.plist configuration file path')
parser.add_argument('-d', '--debug', action='store_true', help='show debug logging')
parser.add_argument('user', help='one or more users to display', nargs='+') # Required
args = parser.parse_args()
DisplayAPNSubscriptions.users = args.user
utilityMain(
args.configFileName,
DisplayAPNSubscriptions,
verbose=args.debug,
)
@inlineCallbacks
def displayAPNSubscriptions(store, directory, root, users):
for user in users:
print
record = yield directory.recordWithShortName(RecordType.user, user)
if record is not None:
print("User %s (%s)..." % (user, record.uid))
txn = store.newTransaction(label="Display APN Subscriptions")
subscriptions = (yield txn.apnSubscriptionsBySubscriber(record.uid))
(yield txn.commit())
if subscriptions:
byKey = {}
for token, key, timestamp, userAgent, ipAddr in subscriptions:
byKey.setdefault(key, []).append((token, timestamp, userAgent, ipAddr))
for key, tokens in byKey.iteritems():
print
protocol, _ignore_host, path = key.strip("/").split("/", 2)
resource = {
"CalDAV": "calendar",
"CardDAV": "addressbook",
}[protocol]
if "/" in path:
uid, collection = path.split("/")
else:
uid = path
collection = None
record = yield directory.recordWithUID(uid)
user = record.shortNames[0]
if collection:
print("...is subscribed to a share from %s's %s home" % (user, resource),)
else:
print("...is subscribed to %s's %s home" % (user, resource),)
# print(" (key: %s)\n" % (key,))
print("with %d device(s):" % (len(tokens),))
for token, timestamp, userAgent, ipAddr in tokens:
print(" %s\n '%s' from %s\n %s" % (
token, userAgent, ipAddr,
time.strftime(
"on %a, %d %b %Y at %H:%M:%S %z(%Z)",
time.localtime(timestamp)
)
))
else:
print(" ...is not subscribed to anything.")
else:
print("User %s not found" % (user,))
|
apache-2.0
| 8,099,020,124,651,099,000
| 38.442308
| 132
| 0.56314
| false
| 4.552719
| false
| false
| false
|
frigg/frigg-common
|
frigg/projects.py
|
1
|
1163
|
# -*- coding: utf8 -*-
import logging
from os import listdir
from os.path import exists, isfile, join
import yaml
from .helpers import detect_test_runners
logger = logging.getLogger(__name__)
def build_tasks(directory):
try:
files = [f for f in listdir(directory) if isfile(join(directory, f))]
except OSError as e:
files = []
logger.error('Could not read files in path {}: \n {}'.format(directory, e))
return detect_test_runners(files)
def load_settings_file(path):
with open(path) as f:
return yaml.load(f)
def get_path_of_settings_file(directory):
if exists(join(directory, '.frigg.yml')):
return join(directory, '.frigg.yml')
elif exists(join(directory, '.frigg.yaml')):
return join(directory, '.frigg.yaml')
def build_settings(directory):
path = get_path_of_settings_file(directory)
settings = {
'webhooks': [],
}
if path is not None:
settings.update(load_settings_file(path))
else:
settings['tasks'] = build_tasks(directory)
if len(settings['tasks']) == 0:
raise RuntimeError('No tasks found')
return settings
|
mit
| 6,964,887,796,134,280,000
| 22.734694
| 83
| 0.638865
| false
| 3.703822
| false
| false
| false
|
jethrogb/episoder
|
pyepisoder/episoder.py
|
1
|
6798
|
# episoder, https://code.ott.net/episoder
#
# Copyright (C) 2004-2020 Stefan Ott. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import logging
from datetime import date, timedelta
import sqlite3
from sqlalchemy import Table, MetaData, create_engine, or_, and_
from sqlalchemy.orm import create_session
from .database import Episode, Show, Meta
class Database(object):
def __init__(self, path):
self._path = path
self.logger = logging.getLogger("Database")
self.open()
self._initdb()
def __str__(self):
return "Episoder Database at %s" % self._path
def __repr__(self):
return "Database(%s)" % self._path
def _initdb(self):
# Initialize the database if all tables are missing
tables = [Show, Episode, Meta]
tables = map(lambda x: x.__table__.exists, tables)
found = [x for x in tables if x(bind=self.engine)]
if len(found) < 1:
Show.__table__.create(bind=self.engine)
Episode.__table__.create(bind=self.engine)
Meta.__table__.create(bind=self.engine)
self.set_schema_version(4)
def open(self):
if self._path.find("://") > -1:
self.engine = create_engine(self._path,
convert_unicode=True)
else:
self.engine = create_engine("sqlite:///%s" % self._path)
self.conn = self.engine.connect()
self.metadata = MetaData()
self.metadata.bind = self.engine
self.session = create_session(bind=self.engine)
self.session.begin()
def close(self):
self.session.commit()
self.session.close()
self.conn.close()
self.engine.dispose()
def set_schema_version(self, version):
meta = Meta()
meta.key = "schema"
meta.value = "%d" % version
self.session.merge(meta)
self.session.flush()
def get_schema_version(self):
if not Meta.__table__.exists(bind=self.engine):
return 1
res = self.session.query(Meta).get("schema")
if res:
return int(res.value)
return 0
def clear(self):
episodes = self.session.query(Episode).all()
for episode in episodes:
self.session.delete(episode)
self.session.flush()
def migrate(self):
schema_version = self.get_schema_version()
self.logger.debug("Found schema version %s", schema_version)
if schema_version < 0:
self.logger.debug("Automatic schema updates disabled")
return
if schema_version == 1:
# Upgrades from version 1 are rather harsh, we
# simply drop and re-create the tables
self.logger.debug("Upgrading to schema version 2")
table = Table("episodes", self.metadata, autoload=True)
table.drop()
table = Table("shows", self.metadata, autoload=True)
table.drop()
Show.__table__.create(bind=self.engine)
Episode.__table__.create(bind=self.engine)
Meta.__table__.create(bind=self.engine)
schema_version = 4
self.set_schema_version(schema_version)
if schema_version == 2:
# Add two new columns to the shows table
self.logger.debug("Upgrading to schema version 3")
# We can only do this with sqlite databases
assert self.engine.driver == "pysqlite"
self.close()
upgrade = sqlite3.connect(self._path)
upgrade.execute("ALTER TABLE shows "
"ADD COLUMN enabled TYPE boolean")
upgrade.execute("ALTER TABLE shows "
"ADD COLUMN status TYPE integer")
upgrade.close()
self.open()
schema_version = 3
self.set_schema_version(schema_version)
if schema_version == 3:
# Add a new column to the episodes table
self.logger.debug("Upgrading to schema version 4")
# We can only do this with sqlite databases
assert self.engine.driver == "pysqlite"
self.close()
upgrade = sqlite3.connect(self._path)
upgrade.execute("ALTER TABLE episodes "
"ADD COLUMN notified TYPE date")
upgrade.close()
self.open()
schema_version = 4
self.set_schema_version(schema_version)
def get_expired_shows(self, today=date.today()):
delta_running = timedelta(2) # 2 days
delta_suspended = timedelta(7) # 1 week
delta_ended = timedelta(14) # 2 weeks
shows = self.session.query(Show).filter(or_(
and_(
Show.enabled,
Show.status == Show.RUNNING,
Show.updated < today - delta_running
),
and_(
Show.enabled,
Show.status == Show.SUSPENDED,
Show.updated < today - delta_suspended
),
and_(
Show.enabled,
Show.status == Show.ENDED,
Show.updated < today - delta_ended
)
))
return shows.all()
def get_enabled_shows(self):
shows = self.session.query(Show).filter(Show.enabled)
return shows.all()
def get_show_by_url(self, url):
shows = self.session.query(Show).filter(Show.url == url)
if shows.count() < 1:
return None
return shows.first()
def get_show_by_id(self, show_id):
return self.session.query(Show).get(show_id)
def add_show(self, show):
show = self.session.merge(show)
self.session.flush()
return show
def remove_show(self, show_id):
show = self.session.query(Show).get(show_id)
if not show:
self.logger.error("No such show")
return
episodes = self.session.query(Episode)
for episode in episodes.filter(Episode.show_id == show.id):
self.session.delete(episode)
self.session.delete(show)
self.session.flush()
def get_shows(self):
return self.session.query(Show).all()
def add_episode(self, episode, show):
episode.show_id = show.id
self.session.merge(episode)
self.session.flush()
def get_episodes(self, basedate=date.today(), n_days=0):
enddate = basedate + timedelta(n_days)
return self.session.query(Episode).\
filter(Episode.airdate >= basedate). \
filter(Episode.airdate <= enddate). \
order_by(Episode.airdate).all()
def search(self, search):
return self.session.query(Episode).\
filter(or_( \
Episode.title.like("%%%s%%" % search),
Show.name.like("%%%s%%" % search))). \
order_by(Episode.airdate).all()
def commit(self):
self.session.commit()
self.session.begin()
def rollback(self):
self.session.rollback()
self.session.begin()
def remove_before(self, then, show=None):
eps = self.session.query(Episode).filter(Episode.airdate < then)
if show:
eps = eps.filter(Episode.show == show)
for episode in eps:
self.session.delete(episode)
self.commit()
|
gpl-3.0
| -8,041,760,182,362,835,000
| 22.201365
| 71
| 0.682848
| false
| 3.215705
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.