repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
angr/angr
|
angr/analyses/decompiler/optimization_passes/mod_simplifier.py
|
1
|
2880
|
import logging
from ailment import Expr
from ... import AnalysesHub
from .engine_base import SimplifierAILEngine, SimplifierAILState
from .optimization_pass import OptimizationPass, OptimizationPassStage
_l = logging.getLogger(name=__name__)
class ModSimplifierAILEngine(SimplifierAILEngine):
def _ail_handle_Sub(self, expr):
operand_0 = self._expr(expr.operands[0])
operand_1 = self._expr(expr.operands[1])
x_0, c_0, x_1, c_1 = None, None, None, None
if isinstance(operand_1, Expr.BinaryOp) \
and isinstance(operand_1.operands[1], Expr.Const) \
and operand_1.op == 'Mul':
if isinstance(operand_1.operands[0], Expr.BinaryOp) \
and isinstance(operand_1.operands[0].operands[1], Expr.Const) \
and operand_1.operands[0].op in ['Div', 'DivMod']:
x_0 = operand_1.operands[0].operands[0]
x_1 = operand_0
c_0 = operand_1.operands[1]
c_1 = operand_1.operands[0].operands[1]
elif isinstance(operand_1.operands[0], Expr.Convert) \
and isinstance(operand_1.operands[0].operand, Expr.BinaryOp) \
and operand_1.operands[0].operand.op in ['Div', 'DivMod']:
x_0 = operand_1.operands[0].operand.operands[0]
x_1 = operand_0
c_0 = operand_1.operands[1]
c_1 = operand_1.operands[0].operand.operands[1]
if x_0 is not None and x_1 is not None and x_0 == x_1 and c_0.value == c_1.value:
return Expr.BinaryOp(expr.idx, 'Mod', [x_0, c_0], expr.signed, **expr.tags)
if (operand_0, operand_1) != (expr.operands[0], expr.operands[1]):
return Expr.BinaryOp(expr.idx, 'Sub', [operand_0, operand_1], expr.signed, **expr.tags)
return expr
def _ail_handle_Mod(self, expr): #pylint: disable=no-self-use
return expr
class ModSimplifier(OptimizationPass):
ARCHES = ["X86", "AMD64"]
PLATFORMS = ["linux", "windows"]
STAGE = OptimizationPassStage.AFTER_GLOBAL_SIMPLIFICATION
def __init__(self, func, **kwargs):
super().__init__(func, **kwargs)
self.state = SimplifierAILState(self.project.arch)
self.engine = ModSimplifierAILEngine()
self.analyze()
def _check(self):
return True, None
def _analyze(self, cache=None):
for block in list(self._graph.nodes()):
new_block = block
old_block = None
while new_block != old_block:
old_block = new_block
new_block = self.engine.process(state=self.state.copy(), block=old_block.copy())
_l.debug("new block: %s", new_block.statements)
self._update_block(block, new_block)
AnalysesHub.register_default("ModSimplifier", ModSimplifier)
|
bsd-2-clause
| 3,081,194,925,430,433,000
| 35.923077
| 99
| 0.591667
| false
| 3.428571
| false
| false
| false
|
pjz/Zappa
|
test_settings.py
|
1
|
1325
|
APP_MODULE = 'tests.test_app'
APP_FUNCTION = 'hello_world'
DJANGO_SETTINGS = None
DEBUG = 'True'
LOG_LEVEL = 'DEBUG'
SCRIPT_NAME = 'hello_world'
DOMAIN = None
API_STAGE = 'ttt888'
PROJECT_NAME = 'ttt888'
REMOTE_ENV='s3://lmbda/test_env.json'
## test_env.json
#{
# "hello": "world"
#}
#
AWS_EVENT_MAPPING = {
'arn:aws:s3:1': 'test_settings.aws_s3_event',
'arn:aws:sns:1': 'test_settings.aws_sns_event',
'arn:aws:dynamodb:1': 'test_settings.aws_dynamodb_event',
'arn:aws:kinesis:1': 'test_settings.aws_kinesis_event',
'arn:aws:sqs:1': 'test_settings.aws_sqs_event'
}
ENVIRONMENT_VARIABLES={'testenv': 'envtest'}
AUTHORIZER_FUNCTION='test_settings.authorizer_event'
def prebuild_me():
print("This is a prebuild script!")
def callback(self):
print("this is a callback")
def aws_s3_event(event, content):
return "AWS S3 EVENT"
def aws_sns_event(event, content):
return "AWS SNS EVENT"
def aws_async_sns_event(arg1, arg2, arg3):
return "AWS ASYNC SNS EVENT"
def aws_dynamodb_event(event, content):
return "AWS DYNAMODB EVENT"
def aws_kinesis_event(event, content):
return "AWS KINESIS EVENT"
def aws_sqs_event(event, content):
return "AWS SQS EVENT"
def authorizer_event(event, content):
return "AUTHORIZER_EVENT"
def command():
print("command")
|
mit
| -1,091,853,218,482,797,700
| 18.485294
| 61
| 0.676981
| false
| 2.760417
| true
| false
| false
|
BayesianLogic/blog
|
tools/blog_py_lexer/blog/lexer.py
|
1
|
3373
|
from pygments.lexer import RegexLexer, bygroups, include
from pygments.token import *
class BlogLexer(RegexLexer):
name = 'BLOG'
aliases = ['blog']
filenames = ['*.blog', '*.dblog']
operators = ['\\-\\>', ':', '\\+', '\\-', '\\*', '/', '\\[', ']',
'\\{', '}', '!', '\\<', '\\>', '\\<=', '\\>=', '==', '!=',
'&', '\\|', '=\\>', '#', '\\^', '%', '@']
wordops = ['isEmptyString', 'succ', 'pred',
'prev', 'inv', 'det', 'min', 'max',
'round', 'transpose', 'sin', 'cos', 'tan',
'atan2', 'sum', 'vstack', 'eye', 'zeros',
'ones', 'toInt', 'toReal', 'diag', 'repmat',
'hstack', 'vstack', 'pi', 'trace']
deliminators = [',', ';', '\\(', '\\)', '=', '~']
keywords = ['extern','import','fixed','distinct','random','origin',
'param','type', 'forall', 'exists', 'obs', 'query',
'if', 'then', 'else', 'for', 'case', 'in']
types = ['Integer','Real','Boolean','NaturalNum','List','Map',
'Timestep','RealMatrix','IntegerMatrix']
distribs = ['TabularCPD', 'Distribution','Gaussian',
'UniformChoice', 'MultivarGaussian', 'Poisson',
'Bernoulli', 'BooleanDistrib', 'Binomial', 'Beta', 'BoundedGenometric',
'Categorical', 'Dirichlet', 'EqualsCPD', 'Gamma', 'Geometric', 'Iota',
'LinearGaussian', 'MixtureDistrib', 'Multinomial',
'NegativeBinamial', 'RoundedLogNormal', 'TabularInterp',
'UniformVector', 'UnivarGaussian',
'Exponential', 'UniformInt', 'UniformReal']
idname_reg = '[a-zA-Z_]\\w*'
def gen_regex(ops):
return "|".join(ops)
tokens = {
'root' : [
(r'//.*?\n', Comment.Single),
(r'(?s)/\*.*?\*/', Comment.Multiline),
('('+idname_reg+')(\\()', bygroups(Name.Function, Punctuation)),
('('+gen_regex(types)+')\\b', Keyword.Type),
('('+gen_regex(distribs)+')\\b', Name.Class),
('('+gen_regex(keywords)+')\\b', Keyword),
(gen_regex(operators), Operator),
('(' + gen_regex(wordops) +')\\b', Operator.Word),
('(true|false|null)\\b', Keyword.Constant),
('('+idname_reg+')\\b', Name),
(r'"(\\\\|\\"|[^"])*"', String),
(gen_regex(deliminators), Punctuation),
(r'\d*\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\s+', Text),
]
}
def run_tests():
tests = [
"type Person;",
"distinct Person Alice, Bob, P[100];",
"random Real x1_x2x3 ~ Gaussian(0, 1);\nrandom Real y ~ Gaussian(x, 1);",
"random type0 funcname(type1 x) =expression;\nrandom type0 funcname(type1 x) dependency-expression;",
"random NaturalNum x ~ Poisson(a);",
"param Real a: 0 < a & a < 10 ;"
"random Real funcname(type1 x);",
"1.0 + 2.0 * 3.0 - 4.0",
"Twice( 10.0 ) * 5.5",
"fixed NaturalNum[] c = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];",
"fixed NaturalNum[][] table = [1, 2, 3; 4, 5, 6];",
"fixed List<NaturalNum> a = List(1, 2, 3, 4, 5, 6);",
"fixed Map<Boolean, Real> map1 = {true -> 0.3, false -> 0.7};",
"Categorical<Boolean> cpd1 =Categorical({true -> 0.3, false -> 0.7});",
"List",
"/*abc */",
"""
/* Evidence for the Hidden Markov Model.
*/
"""
]
lexer = BlogLexer()
for test in tests:
print(test)
for token in (lexer.get_tokens(test)):
print(token)
if __name__ == '__main__':
run_tests()
|
bsd-3-clause
| 4,312,323,977,143,371,000
| 37.770115
| 105
| 0.501927
| false
| 3.140596
| true
| false
| false
|
haematologic/cellcounter
|
cellcounter/accounts/views.py
|
1
|
7496
|
from braces.views import LoginRequiredMixin
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.utils.http import urlsafe_base64_decode
from django.utils.safestring import mark_safe
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic import FormView, UpdateView, DetailView, DeleteView
from ratelimit.exceptions import Ratelimited
from ratelimit.mixins import RatelimitMixin
from ratelimit.utils import is_ratelimited
from .forms import EmailUserCreationForm, PasswordResetForm
class RateLimitedFormView(FormView):
ratelimit_key = 'ip'
ratelimit_block = True
ratelimit_rate = '1/h'
ratelimit_group = None
def dispatch(self, *args, **kwargs):
ratelimited = is_ratelimited(request=self.request,
group=self.ratelimit_group,
key=self.ratelimit_key,
rate=self.ratelimit_rate,
increment=False)
if ratelimited and self.ratelimit_block:
raise Ratelimited()
return super(RateLimitedFormView, self).dispatch(*args, **kwargs)
class RegistrationView(RateLimitedFormView):
template_name = 'accounts/register.html'
form_class = EmailUserCreationForm
ratelimit_group = 'registration'
def form_valid(self, form):
user = form.save()
messages.success(self.request,
mark_safe(
"Successfully registered, you are now logged in! <a href='%s'>View your profile</a>" %
reverse('user-detail', kwargs={'pk': user.id})))
user = authenticate(username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
login(self.request, user)
is_ratelimited(request=self.request, group=self.ratelimit_group, key=self.ratelimit_key,
rate=self.ratelimit_rate, increment=True)
return super(RegistrationView, self).form_valid(form)
def get_success_url(self):
return reverse('new_count')
class PasswordChangeView(LoginRequiredMixin, FormView):
template_name = 'accounts/password_change.html'
form_class = PasswordChangeForm
def get_form_kwargs(self):
kwargs = super(PasswordChangeView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
form.save()
messages.success(self.request, "Password changed successfully")
return HttpResponseRedirect(reverse('new_count'))
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
context_object_name = 'user_detail'
template_name = 'accounts/user_detail.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserDetailView, self).get_object()
else:
raise PermissionDenied
def get_context_data(self, **kwargs):
context = super(UserDetailView, self).get_context_data(**kwargs)
context['keyboards'] = self.object.keyboard_set.all().order_by('-is_primary')
return context
class UserDeleteView(LoginRequiredMixin, DeleteView):
model = User
context_object_name = 'user_object'
template_name = 'accounts/user_check_delete.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserDeleteView, self).get_object()
else:
raise PermissionDenied
def get_success_url(self):
messages.success(self.request, "User account deleted")
return reverse('new_count')
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ['first_name', 'last_name', 'email', ]
template_name = 'accounts/user_update.html'
def get_object(self, queryset=None):
if self.request.user.id == int(self.kwargs['pk']):
return super(UserUpdateView, self).get_object()
else:
raise PermissionDenied
def get_success_url(self):
messages.success(self.request, "User details updated")
return reverse('user-detail', kwargs={'pk': self.kwargs['pk']})
class PasswordResetView(RatelimitMixin, FormView):
template_name = 'accounts/reset_form.html'
form_class = PasswordResetForm
ratelimit_rate = '5/h'
ratelimit_group = 'pwdreset'
ratelimit_key = 'ip'
ratelimit_block = True
def form_valid(self, form):
form.save(request=self.request)
messages.success(self.request, 'Reset email sent')
return super(PasswordResetView, self).form_valid(form)
def form_invalid(self, form):
"""Don't expose form errors to the user"""
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('new_count')
class PasswordResetConfirmView(FormView):
template_name = 'accounts/reset_confirm.html'
form_class = SetPasswordForm
@method_decorator(sensitive_post_parameters())
def dispatch(self, request, *args, **kwargs):
return super(PasswordResetConfirmView, self).dispatch(request, *args, **kwargs)
@staticmethod
def valid_user(uidb64):
try:
uid = urlsafe_base64_decode(uidb64)
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return None
return user
@staticmethod
def valid_token(user, token):
if user is not None:
return default_token_generator.check_token(user, token)
else:
return False
def _valid_inputs(self, uidb64, token):
self.user_object = self.valid_user(uidb64)
return self.valid_token(self.user_object, token)
def get(self, request, *args, **kwargs):
if self._valid_inputs(self.kwargs['uidb64'], self.kwargs['token']):
form = self.get_form(self.get_form_class())
return self.render_to_response(self.get_context_data(form=form, validlink=True))
else:
return self.render_to_response(self.get_context_data(validlink=False))
def post(self, request, *args, **kwargs):
if self._valid_inputs(self.kwargs['uidb64'], self.kwargs['token']):
return super(PasswordResetConfirmView, self).post(request, *args, **kwargs)
else:
return self.render_to_response(self.get_context_data(validlink=False))
def get_form_kwargs(self):
kwargs = super(PasswordResetConfirmView, self).get_form_kwargs()
kwargs['user'] = self.user_object
return kwargs
def form_valid(self, form):
form.save()
messages.success(self.request, 'Password reset successfully')
return HttpResponseRedirect(reverse('new_count'))
def rate_limited(request, exception):
messages.error(request, 'You have been rate limited')
return HttpResponseRedirect(reverse('new_count'))
|
mit
| -4,984,765,481,467,892,000
| 36.293532
| 115
| 0.665555
| false
| 4.082789
| false
| false
| false
|
rohitwaghchaure/digitales_erpnext
|
erpnext/accounts/report/budget_variance_report/budget_variance_report.py
|
1
|
4804
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt
from frappe.utils import formatdate
import time
from erpnext.accounts.utils import get_fiscal_year
from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
cam_map = get_costcenter_account_month_map(filters)
data = []
for cost_center, cost_center_items in cam_map.items():
for account, monthwise_data in cost_center_items.items():
row = [cost_center, account]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "actual", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "company"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label,
raise_exception=True)
columns = [_("Cost Center") + ":Link/Cost Center:120", _("Account") + ":Link/Account:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in [_("Target") + " (%s)", _("Actual") + " (%s)", _("Variance") + " (%s)"]:
if group_months:
label = label % (formatdate(from_date, format_string="MMM") + " - " + formatdate(from_date, format_string="MMM"))
else:
label = label % formatdate(from_date, format_string="MMM")
columns.append(label+":Float:120")
return columns + [_("Total Target") + ":Float:120", _("Total Actual") + ":Float:120",
_("Total Variance") + ":Float:120"]
#Get cost center & target details
def get_costcenter_target_details(filters):
return frappe.db.sql("""select cc.name, cc.distribution_id,
cc.parent_cost_center, bd.account, bd.budget_allocated
from `tabCost Center` cc, `tabBudget Detail` bd
where bd.parent=cc.name and bd.fiscal_year=%s and
cc.company=%s order by cc.name""" % ('%s', '%s'),
(filters.get("fiscal_year"), filters.get("company")), as_dict=1)
#Get target distribution details of accounts of cost center
def get_target_distribution_details(filters):
target_details = {}
for d in frappe.db.sql("""select bd.name, bdd.month, bdd.percentage_allocation
from `tabBudget Distribution Detail` bdd, `tabBudget Distribution` bd
where bdd.parent=bd.name and bd.fiscal_year=%s""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get actual details from gl entry
def get_actual_details(filters):
ac_details = frappe.db.sql("""select gl.account, gl.debit, gl.credit,
gl.cost_center, MONTHNAME(gl.posting_date) as month_name
from `tabGL Entry` gl, `tabBudget Detail` bd
where gl.fiscal_year=%s and company=%s
and bd.account=gl.account and bd.parent=gl.cost_center""" % ('%s', '%s'),
(filters.get("fiscal_year"), filters.get("company")), as_dict=1)
cc_actual_details = {}
for d in ac_details:
cc_actual_details.setdefault(d.cost_center, {}).setdefault(d.account, []).append(d)
return cc_actual_details
def get_costcenter_account_month_map(filters):
import datetime
costcenter_target_details = get_costcenter_target_details(filters)
tdd = get_target_distribution_details(filters)
actual_details = get_actual_details(filters)
cam_map = {}
for ccd in costcenter_target_details:
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
cam_map.setdefault(ccd.name, {}).setdefault(ccd.account, {})\
.setdefault(month, frappe._dict({
"target": 0.0, "actual": 0.0
}))
tav_dict = cam_map[ccd.name][ccd.account][month]
month_percentage = tdd.get(ccd.distribution_id, {}).get(month, 0) \
if ccd.distribution_id else 100.0/12
tav_dict.target = flt(ccd.budget_allocated) * month_percentage / 100
for ad in actual_details.get(ccd.name, {}).get(ccd.account, []):
if ad.month_name == month:
tav_dict.actual += flt(ad.debit) - flt(ad.credit)
return cam_map
|
agpl-3.0
| 4,049,678,227,760,201,700
| 36.826772
| 117
| 0.678601
| false
| 3.111399
| false
| false
| false
|
uclouvain/osis_louvain
|
assessments/forms/score_file.py
|
1
|
1880
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from django.utils.translation import ugettext_lazy as _
class ScoreFileForm(forms.Form):
file = forms.FileField(error_messages={'required': _('no_file_submitted')})
def clean_file(self):
file = self.cleaned_data['file']
content_type = file.content_type.split('/')[1]
valid_content_type = 'vnd.openxmlformats-officedocument.spreadsheetml.sheet' in content_type
if ".xlsx" not in file.name or not valid_content_type:
self.add_error('file', forms.ValidationError(_('file_must_be_xlsx'), code='invalid'))
return file
|
agpl-3.0
| -502,586,721,529,759,000
| 47.179487
| 100
| 0.654604
| false
| 4.075922
| false
| false
| false
|
hsharsha/perfrunner
|
perfrunner/tests/functional.py
|
1
|
1852
|
import unittest
from perfrunner.__main__ import get_options
from perfrunner.helpers.memcached import MemcachedHelper
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.helpers.rest import RestHelper
from perfrunner.settings import ClusterSpec, TestConfig
from perfrunner.tests import TargetIterator
class FunctionalTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
options, _args = get_options()
override = \
_args and (arg.split('.') for arg in ' '.join(_args).split(','))
self.cluster_spec = ClusterSpec()
self.cluster_spec.parse(options.cluster_spec_fname)
self.test_config = TestConfig()
self.test_config.parse(options.test_config_fname, override)
self.target_iterator = TargetIterator(self.cluster_spec,
self.test_config)
self.memcached = MemcachedHelper(self.test_config)
self.remote = RemoteHelper(self.cluster_spec, self.test_config)
self.rest = RestHelper(self.cluster_spec)
super(FunctionalTest, self).__init__(*args, **kwargs)
class MemcachedTests(FunctionalTest):
def test_num_threads(self):
expected_threads = self.test_config.cluster.num_cpus
if expected_threads is None:
cores = self.remote.detect_number_cores()
expected_threads = int(0.75 * cores)
for target in self.target_iterator:
host = target.node.split(':')[0]
port = self.rest.get_memcached_port(target.node)
stats = self.memcached.get_stats(host, port, target.bucket,
stats='')
num_threads = int(stats['threads'])
self.assertEqual(num_threads, expected_threads)
if __name__ == '__main__':
unittest.main(argv=['functional.py'])
|
apache-2.0
| 919,988,435,475,458,000
| 37.583333
| 76
| 0.636609
| false
| 4.124722
| true
| false
| false
|
mfasq1Monash/FIT3140
|
interpreter.py
|
1
|
6491
|
'''
Author: Michael Asquith, Aaron Gruneklee
Created: 2014.12.08
Last Modified: 2014.12.23
Interpreter for a simple functional programming language.
Access with interpret(command)
Based on Peter Norvig's Lispy interpreter, http://norvig.com/lispy.html
'''
import math, operator as op
from robotio import RobotIO
Symbol = str
class VariableAlreadyPresentException(Exception):
pass
class FunctionAlreadyDefinedException(Exception):
pass
class VariableAlreadySetException(Exception):
pass
class VariableNotFoundException(Exception):
pass
class InterpretedList(list):
pass
class Procedure(object):
"""A user-defined method for the interpreter"""
def __init__(self, parms, stats, env, inter):
self.parameters = parms
self.statements = stats
self.environment = env
self.interpreter = inter
def __call__(self, *args):
localVariables = Environment(self.parameters, args, self.environment)
return self.interpreter.evaluate(self.statements, localVariables)
class Environment(dict):
"""A set of variables for the interpreter or a method within it."""
def __init__(self, parms=(), expressions=(), outer=None):
"""When evaluating, procedures will pass in their parameters"""
self.update(zip(parms, expressions))
self.outer = outer
def find(self, variable):
"""Returns the lowest level Environment which has variable"""
if variable in self:
return self
try:
return self.outer.find(variable)
except AttributeError:
raise VariableNotFoundException
def add_new(self, variable, value):
"""Adds a new definition to the environment. If the variable is already present, raises a KeyAlreadyPresentError"""
if variable in self:
raise(VariableAlreadyPresentException)
self[variable] = value
class Interpreter:
"""After initialising an interpreter, run expressions by calling interpret.
"""
def __init__(self, newRobotIO):
"""Creates an interpreter with standard math operations and variables.
Can send input/output to newRobotIO
"""
self.global_environment = self.standard_environment()
self.robotio = newRobotIO
def interpret(self, code):
"""Parses and executes code a string in the form of:
(method_name argument1 argument2)
Arguments which are expressions must be placed in brackets.
Arguments which are not expressions must not be placed in brackets.
"""
return self.evaluate(self.parse(code))
def parse(self, code):
"Read an expression from a string."
return self.read_from_tokens(self.tokenize(code))
def tokenize(self, s):
"Convert a string into a list of tokens."
return s.replace('(',' ( ').replace(')',' ) ').split()
def read_from_tokens(self, tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(self.read_from_tokens(tokens))
tokens.pop(0) # pop off ')'
return L
elif ')' == token:
raise SyntaxError('unexpected )')
else:
return self.atom(token)
def atom(self, token):
"Numbers become numbers, booleans become booleans, everything else become symbols."
try:
return int(token)
except ValueError:
if token.lower() == 'true':
return True
elif token.lower() == 'false':
return False
else:
return Symbol(token)
def standard_environment(self):
"Creates the base variable environment"
env = Environment()
env.update(vars(math))
env.update({
'+':op.add, '-':op.sub, '*':op.mul, '/':op.div,
'>':op.gt, '<':op.lt, '>=':op.ge, '<=':op.le, '=':op.eq,
'define':None, 'if':None, 'set':None, 'comment':None,
'%': lambda x,y: abs(x % y),
'and': lambda x,y: x and y,
'or': lambda x,y: x or y,
'not': lambda x: not x,
'move': lambda x: self.robotio.move(x),
'turn': lambda x: self.robotio.turn(x),
'detect-wall': lambda x: self.robotio.detect_wall(x),
'detect-goal': lambda x: self.robotio.detect_goal(x),
'[]': InterpretedList(),
'build': lambda x,y: InterpretedList([x] + y),
'head': lambda x: x[0],
'tail': lambda x: InterpretedList(x[1:])
})
return env
def evaluate(self, x, env=None):
if env == None:
env = self.global_environment
# If x is a list, must be evaluating a method
if isinstance(x, list):
if isinstance(x, InterpretedList):
return x
method = x.pop(0)
# Defines a function
if method == 'define':
try:
self.global_environment.add_new(x[0], Procedure(x[1], x[2], env, self))
except VariableAlreadyPresentException:
raise FunctionAlreadyDefinedException
# If statement. [Test, consequences, alternative]
elif method == 'if':
if self.evaluate(x[0]):
return self.evaluate(x[1])
return self.evaluate(x[2])
# Sets a variable
elif method == 'set':
try:
env.add_new(x[0], self.evaluate(x[1],env))
except VariableAlreadyPresentException:
raise VariableAlreadySetException
return
elif method == 'comment':
return
# Executes all other functions
else:
method = self.evaluate(method, self.global_environment)
args = [self.evaluate(variable, env) for variable in x]
return method(*args)
elif isinstance(x, Symbol):
return self.evaluate(env.find(x)[x])
else:
return x
|
mit
| 5,197,515,918,788,058,000
| 33.343915
| 123
| 0.558312
| false
| 4.412644
| false
| false
| false
|
scottrice/Ice
|
ice/tasks/engine.py
|
1
|
1118
|
# encoding: utf-8
import os
from pysteam import paths as steam_paths
from pysteam import shortcuts
from pysteam import steam as steam_module
from ice import backups
from ice import configuration
from ice import consoles
from ice import emulators
from ice import paths
from ice import settings
from ice.logs import logger
from ice.persistence.config_file_backing_store import ConfigFileBackingStore
class TaskEngine(object):
def __init__(self, steam):
self.steam = steam
logger.debug("Initializing Ice")
# We want to ignore the anonymous context, cause theres no reason to sync
# ROMs for it since you cant log in as said user.
is_user_context = lambda context: context.user_id != 'anonymous'
self.users = filter(is_user_context, steam_module.local_user_contexts(self.steam))
def run(self, tasks, app_settings, dry_run=False):
if self.steam is None:
logger.error("Cannot run Ice because Steam doesn't appear to be installed")
return
logger.info("=========== Starting Ice ===========")
for task in tasks:
task(app_settings, self.users, dry_run=dry_run)
|
mit
| 4,875,944,689,318,500,000
| 29.216216
| 86
| 0.723614
| false
| 3.56051
| false
| false
| false
|
TheWitchers/Team
|
TestingArea/TESTZONE_methods.py
|
1
|
1111
|
__author__ = 'dvir'
import tkFileDialog
import sqlite3
conn = sqlite3.connect(tkFileDialog.askopenfilename())
c = conn.cursor()
# using example db
def ex_show_purch(price):
l = []
for row in c.execute("SELECT symbol FROM stocks WHERE price > " + str(price) + ""):
print row
l.append(row)
print l
return l
ex_show_purch(raw_input("Enter Price: "))
# for project db
def show_purch(name):
l = []
for row in c.execute("SELECT * FROM Purchaseses WHERE nickname = '" + name + "'"):
print row
l.append(row)
print l
return l
def correct_user(id, pas):
if len(c.execute("SELECT * FROM Users WHERE username = '" + id + "' AND password = '" + pas + "'")) > 0:
print "user exists"
else:
print "user does not exist"
def has_inf(col, tbl, info):
if len(c.execute(
"SELECT '" + col + "' FROM Users WHERE username = '" + id + "' AND '" + col + "' = '" + info + "'")) > 0:
print col + "already exists"
else:
print col + " is OK"
|
gpl-2.0
| 6,712,637,197,793,229,000
| 24.25
| 181
| 0.531953
| false
| 3.549521
| false
| false
| false
|
MagicStack/asyncpg
|
asyncpg/transaction.py
|
1
|
8297
|
# Copyright (C) 2016-present the asyncpg authors and contributors
# <see AUTHORS file>
#
# This module is part of asyncpg and is released under
# the Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
import enum
from . import connresource
from . import exceptions as apg_errors
class TransactionState(enum.Enum):
NEW = 0
STARTED = 1
COMMITTED = 2
ROLLEDBACK = 3
FAILED = 4
ISOLATION_LEVELS = {'read_committed', 'serializable', 'repeatable_read'}
ISOLATION_LEVELS_BY_VALUE = {
'read committed': 'read_committed',
'serializable': 'serializable',
'repeatable read': 'repeatable_read',
}
class Transaction(connresource.ConnectionResource):
"""Represents a transaction or savepoint block.
Transactions are created by calling the
:meth:`Connection.transaction() <connection.Connection.transaction>`
function.
"""
__slots__ = ('_connection', '_isolation', '_readonly', '_deferrable',
'_state', '_nested', '_id', '_managed')
def __init__(self, connection, isolation, readonly, deferrable):
super().__init__(connection)
if isolation and isolation not in ISOLATION_LEVELS:
raise ValueError(
'isolation is expected to be either of {}, '
'got {!r}'.format(ISOLATION_LEVELS, isolation))
self._isolation = isolation
self._readonly = readonly
self._deferrable = deferrable
self._state = TransactionState.NEW
self._nested = False
self._id = None
self._managed = False
async def __aenter__(self):
if self._managed:
raise apg_errors.InterfaceError(
'cannot enter context: already in an `async with` block')
self._managed = True
await self.start()
async def __aexit__(self, extype, ex, tb):
try:
self._check_conn_validity('__aexit__')
except apg_errors.InterfaceError:
if extype is GeneratorExit:
# When a PoolAcquireContext is being exited, and there
# is an open transaction in an async generator that has
# not been iterated fully, there is a possibility that
# Pool.release() would race with this __aexit__(), since
# both would be in concurrent tasks. In such case we
# yield to Pool.release() to do the ROLLBACK for us.
# See https://github.com/MagicStack/asyncpg/issues/232
# for an example.
return
else:
raise
try:
if extype is not None:
await self.__rollback()
else:
await self.__commit()
finally:
self._managed = False
@connresource.guarded
async def start(self):
"""Enter the transaction or savepoint block."""
self.__check_state_base('start')
if self._state is TransactionState.STARTED:
raise apg_errors.InterfaceError(
'cannot start; the transaction is already started')
con = self._connection
if con._top_xact is None:
if con._protocol.is_in_transaction():
raise apg_errors.InterfaceError(
'cannot use Connection.transaction() in '
'a manually started transaction')
con._top_xact = self
else:
# Nested transaction block
if self._isolation:
top_xact_isolation = con._top_xact._isolation
if top_xact_isolation is None:
top_xact_isolation = ISOLATION_LEVELS_BY_VALUE[
await self._connection.fetchval(
'SHOW transaction_isolation;')]
if self._isolation != top_xact_isolation:
raise apg_errors.InterfaceError(
'nested transaction has a different isolation level: '
'current {!r} != outer {!r}'.format(
self._isolation, top_xact_isolation))
self._nested = True
if self._nested:
self._id = con._get_unique_id('savepoint')
query = 'SAVEPOINT {};'.format(self._id)
else:
query = 'BEGIN'
if self._isolation == 'read_committed':
query += ' ISOLATION LEVEL READ COMMITTED'
elif self._isolation == 'repeatable_read':
query += ' ISOLATION LEVEL REPEATABLE READ'
elif self._isolation == 'serializable':
query += ' ISOLATION LEVEL SERIALIZABLE'
if self._readonly:
query += ' READ ONLY'
if self._deferrable:
query += ' DEFERRABLE'
query += ';'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.STARTED
def __check_state_base(self, opname):
if self._state is TransactionState.COMMITTED:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is already committed'.format(
opname))
if self._state is TransactionState.ROLLEDBACK:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is already rolled back'.format(
opname))
if self._state is TransactionState.FAILED:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is in error state'.format(
opname))
def __check_state(self, opname):
if self._state is not TransactionState.STARTED:
if self._state is TransactionState.NEW:
raise apg_errors.InterfaceError(
'cannot {}; the transaction is not yet started'.format(
opname))
self.__check_state_base(opname)
async def __commit(self):
self.__check_state('commit')
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = 'RELEASE SAVEPOINT {};'.format(self._id)
else:
query = 'COMMIT;'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.COMMITTED
async def __rollback(self):
self.__check_state('rollback')
if self._connection._top_xact is self:
self._connection._top_xact = None
if self._nested:
query = 'ROLLBACK TO {};'.format(self._id)
else:
query = 'ROLLBACK;'
try:
await self._connection.execute(query)
except BaseException:
self._state = TransactionState.FAILED
raise
else:
self._state = TransactionState.ROLLEDBACK
@connresource.guarded
async def commit(self):
"""Exit the transaction or savepoint block and commit changes."""
if self._managed:
raise apg_errors.InterfaceError(
'cannot manually commit from within an `async with` block')
await self.__commit()
@connresource.guarded
async def rollback(self):
"""Exit the transaction or savepoint block and rollback changes."""
if self._managed:
raise apg_errors.InterfaceError(
'cannot manually rollback from within an `async with` block')
await self.__rollback()
def __repr__(self):
attrs = []
attrs.append('state:{}'.format(self._state.name.lower()))
if self._isolation is not None:
attrs.append(self._isolation)
if self._readonly:
attrs.append('readonly')
if self._deferrable:
attrs.append('deferrable')
if self.__class__.__module__.startswith('asyncpg.'):
mod = 'asyncpg'
else:
mod = self.__class__.__module__
return '<{}.{} {} {:#x}>'.format(
mod, self.__class__.__name__, ' '.join(attrs), id(self))
|
apache-2.0
| -3,851,821,235,338,135,000
| 33.861345
| 78
| 0.556105
| false
| 4.578918
| false
| false
| false
|
turtledb/0install
|
zeroinstall/injector/qdom.py
|
1
|
3485
|
"""A quick DOM implementation.
Python's xml.dom is very slow. The xml.sax module is also slow (as it imports urllib2).
This is our light-weight version.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from xml.parsers import expat
import zeroinstall
from zeroinstall.injector import versions
_parsed_version = versions.parse_version(zeroinstall.version)
class Element(object):
"""An XML element.
@ivar uri: the element's namespace
@type uri: str
@ivar name: the element's localName
@type name: str
@ivar attrs: the element's attributes (key is in the form [namespace " "] localName)
@type attrs: {str: str}
@ivar childNodes: children
@type childNodes: [L{Element}]
@ivar content: the text content
@type content: str"""
__slots__ = ['uri', 'name', 'attrs', 'childNodes', 'content']
def __init__(self, uri, name, attrs):
"""@type uri: str
@type name: str
@type attrs: {str: str}"""
self.uri = uri
self.name = name
self.attrs = attrs.copy()
self.content = None
self.childNodes = []
def __str__(self):
"""@rtype: str"""
attrs = [n + '=' + self.attrs[n] for n in self.attrs]
start = '<{%s}%s %s' % (self.uri, self.name, ' '.join(attrs))
if self.childNodes:
return start + '>' + '\n'.join(map(str, self.childNodes)) + ('</%s>' % (self.name))
elif self.content:
return start + '>' + self.content + ('</%s>' % (self.name))
else:
return start + '/>'
def getAttribute(self, name):
"""@type name: str
@rtype: str"""
return self.attrs.get(name, None)
class QSAXhandler(object):
"""SAXHandler that builds a tree of L{Element}s"""
def __init__(self, filter_for_version = False):
"""@param filter_for_version: skip elements if their if-0install-version attribute doesn't match L{zeroinstall.version} (since 1.13).
@type filter_for_version: bool
@rtype: bool"""
self.stack = []
if filter_for_version:
self.filter_range = lambda expr: versions.parse_version_expression(expr)(_parsed_version)
else:
self.filter_range = lambda x: True
def startElementNS(self, fullname, attrs):
"""@type fullname: str
@type attrs: {str: str}"""
split = fullname.split(' ', 1)
if len(split) == 2:
self.stack.append(Element(split[0], split[1], attrs))
else:
self.stack.append(Element(None, fullname, attrs))
self.contents = ''
def characters(self, data):
"""@type data: str"""
self.contents += data
def endElementNS(self, name):
"""@type name: str"""
contents = self.contents.strip()
self.stack[-1].content = contents
self.contents = ''
new = self.stack.pop()
if self.stack:
target_versions = new.attrs.get('if-0install-version')
if target_versions and not self.filter_range(target_versions):
return
self.stack[-1].childNodes.append(new)
else:
self.doc = new
def parse(source, filter_for_version = False):
"""Parse an XML stream into a tree of L{Element}s.
@param source: data to parse
@type source: file
@param filter_for_version: skip elements if their if-0install-version attribute doesn't match L{zeroinstall.version} (since 1.13).
@type filter_for_version: bool
@return: the root
@rtype: L{Element}"""
handler = QSAXhandler(filter_for_version)
parser = expat.ParserCreate(namespace_separator = ' ')
parser.StartElementHandler = handler.startElementNS
parser.EndElementHandler = handler.endElementNS
parser.CharacterDataHandler = handler.characters
parser.ParseFile(source)
return handler.doc
|
lgpl-2.1
| -237,805,872,924,645,440
| 29.840708
| 135
| 0.682927
| false
| 3.119964
| false
| false
| false
|
bandit145/ans-between
|
src/dictops.py
|
1
|
1886
|
#TODO: allow missing params and args lists to pass tests
from src import logging
class dict_mgm:
#creates ansible command to run
def make_play(data,db_data,location):
if dict_mgm.data_check(data, db_data) == 'OK':
command = 'ansible-playbook {location}'.format(location=location)
#did and incredi bad if else thing
logging.debug(data.keys())
command+=data['name']+' '
if 'params' in data.keys():
command+= dict_mgm.sort_params(data['params'])
if 'args' in data.keys():
command+= dict_mgm.sort_args(data['args'])
if 'password' in data.keys():
password = data['password']
else:
password = None
logging.debug(command)
logging.debug(password)
return command, password
else:
return 'Error', None
#check integrity of submitted data compared to its schema model
def data_check(data,db_data):
logging.debug(data)
logging.debug(db_data)
if len(data) != len(db_data):
logging.debug('triggered 1')
return 'Error'
if data.keys() != db_data.keys():
logging.debug('triggered 2')
return 'Error'
if len(data.values()) != len(db_data.values()):
logging.debug('triggered 3')
return 'Error'
#for playbooks that have no params/args
try:
if len(data['params']) != len(db_data['params']):
logging.debug('triggered 4')
return 'Error'
except KeyError:
pass
try:
if len(data['args']) != len(db_data['args']):
logging.debug('triggered 5')
return 'Error'
except KeyError:
pass
logging.debug('OK')
return 'OK'
def sort_params(params):#deals with param dics
command = ''
for item in params:
keys= list(item.keys())
values= list(item.values())
logging.debug(keys)
logging.debug(values)
command+=keys[0]+' '+values[0]+' '
return command
def sort_args(args): #deals with args list
command = ''
for arg in args:
command+= arg+' '
return command
|
mit
| -4,789,335,474,906,966,000
| 25.208333
| 68
| 0.656416
| false
| 3.107084
| false
| false
| false
|
phense/check_duplicate_files
|
check_duplicate_files.py
|
1
|
21660
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""check_duplicate_files.py
Finds all duplicate files in given directories using a hash-algorithm.
After scanning the filesystem for possible duplicate files (all files with a unique
filesize are dismissed, except for Images when selecting the perceptual hash
algorithm). All possible candidate duplicate files are hashed. With pre-filtering,
this module is extremely fast on large file-sets since only a handful of files
need to actually hbe ashed.
Standard use: python3 check_duplicate_files -i /some/folder ./out.txt
"""
# FEATURE(zyrkon): ignore/include win/linux/mac hidden file
# FEATURE(zyrkon): implement multiprocessor for hashing
# FEATURE(zyrkon): find broken symbolic links
# FEATURE(zyrkon): find empty files and directories
# FEATURE(zyrkon): --size 20M-1G to find files between 20mb and 1gb (example)
# FEATURE(zyrkon): maybe a GUI
__author__ = 'Peter Hense (peter.hense@gmail.com)'
__copyright__ = 'Copyright (c) 2015, Peter Hense'
__license__ = 'Apache License Version 2.0'
__credits__ = '' # ['List', 'of', 'programmers']
__status__ = 'Development' # Prototype / Development / Production
__version__ = '0.8'
import os
import sys
if sys.version_info < (3, 0):
sys.stdout.write("Sorry, requires Python 3.x, not Python 2.x\n")
sys.exit(1)
import codecs
import datetime
import hashlib
import json
import operator
import signal
from argparse import ArgumentParser
from argparse import ArgumentTypeError
from collections import defaultdict
from tqdm import *
from stat import *
try:
from PIL import Image # Pillow (modern PIL fork)
except ImportError:
IMG_LIB_ERROR = True
else:
IMG_LIB_ERROR = False
FILEREADERROR = 255
def generate_hashes(filelist, image_list, hashtype, pHash):
""" Main-Module for handling all File-Hashing and saving the hash-results
Args:
filelist: List of file-paths to REGULAR FILES to run a normal hash-algorithm on
image_list: List of file-paths of images to run a perceptual hash-algorithm on
hashtype: hash-algorithm to use for normal files (default=md5)
pHash: boolean switch to activate perceptual image-hashing
Returns:
d_list_hash: dictionary with lists of files sorted by hash-value (key)
errorlist: list of files that could not be accessed / read
"""
d_list_hash = defaultdict(list)
errorlist = []
for file_path in tqdm(filelist, 'hashing', None, True):
hash = _hash(file_path, hashtype)
if hash != FILEREADERROR:
d_list_hash[hash].append(file_path)
else:
errorlist.append(file_path)
if pHash: # perceptual image hashing
d_list_hash_img = defaultdict(list)
for file_path in tqdm(image_list, 'hashing images:', None, True):
hash = _perceptive_hash(file_path)
if hash != FILEREADERROR:
d_list_hash_img[hash].append(file_path)
else:
errorlist.append(file_path)
# calculate hamming-distance between all image-hashes to find
# outliners (hamming distance of two perceptual hashes < 4 means the images
# are basically the same)
index_list = [key for key in d_list_hash_img]
deleted_index_keys = []
for hash1 in tqdm(index_list, 'calculating', None, True):
if hash1 in deleted_index_keys:
continue
for hash2 in index_list:
if hash1 == hash2:
continue # same entry in list
if hash2 in deleted_index_keys:
continue
if _hamming_distance(hash1, hash2) < 4:
d_list_hash_img[hash1] += d_list_hash_img[hash2]
del d_list_hash_img[hash2]
deleted_index_keys.append(hash2)
# Filter out all unique entries from our resultset
_delete_unique_entries(d_list_hash)
if pHash:
_delete_unique_entries(d_list_hash_img)
d_list_hash.update(d_list_hash_img)
return d_list_hash, errorlist
def _perceptive_hash(file_path, hash_size = 8):
"""Calculates a hash-value from an image
Conversion uses a resized, grayscaled pixel-array of the image, converting
the pixel-array to a number-array (differences between neighboring pixels)
and finally converting these values to a hex-string of length hash_size
Args:
file_path: Path to an Image File
hash_size: Size of the generated hash string
Returns:
hash_string: generated hash string
"""
# if memory consumption is to high for many images, it is posisble to use
# with open (file_path, 'rb') as f:
# image = Image.open(f)
# ...
# del image
try:
image = Image.open(file_path)
except:
return FILEREADERROR
# Grayscale and shrink the image in one step
image = image.convert('L').resize((hash_size + 1, hash_size), Image.ANTIALIAS)
pixels = list(image.getdata())
# Compage adjacent pixels
difference = []
for row in range(hash_size):
for col in range(hash_size):
pixel_left = image.getpixel((col, row))
pixel_right = image.getpixel((col +1, row))
difference.append(pixel_left > pixel_right)
# Convert binary array to hexadecimal string
decimal_value = 0
hex_string = []
for index, value in enumerate(difference):
if value:
decimal_value += 2**(index % 8)
if (index % 8) == 7:
hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
decimal_value = 0
return ''.join(hex_string)
def _hash(file_path, hashtype):
"""Uses a specified standard hash-algorithm to hash a regular file
Args:
file_path: file_path to a regular file that can be hashed
hashtype: version of hash-algorithm, default = md5
Returns:
hash: hash-string of the hashed file
Raises:
Returns global const FILEREADERROR on IOError
"""
try:
with open(file_path, 'rb') as f:
contents = f.read()
except:
return FILEREADERROR
hasher = getattr(hashlib, hashtype.lower(), hashlib.md5)
return hasher(contents).hexdigest()
def _hamming_distance(string1, string2):
""" Calculates the Hamming Distance of two strings, fast version
Args:
string1, string2: two strings of the same length
Returns:
Integer describing the Hamming Distance of the input strings
"""
assert len(string1) == len(string2)
ne = operator.ne # faster than '!=' and 'str.__ne__'
return sum(map(ne, string1, string2))
def scan_directories(directories, pHash):
""" creates file-lists from given directories
Recursively walks the given directories and their subdirectories, checking
all including files and their file-sizes. These are saved inside a dictionary
and pre-filtered by filesize. Optional separate handling of image-files.
Args:
directories: List of directories to crawl
pHash: boolean switch to active separate handling of image-files
Returns:
prefiltered_files: List of files with their file-paths
images: List of image-files if pHash is set, else an empty list
errorlist: List of files that could not be accessed
"""
extensions = ('.jpg', '.jpeg', '.png', '.bmp')
d_list_filesize = defaultdict(list)
images = []
errorlist = []
count = 0
print('Scanning directories...')
# code could be a lot smaller with `if pHash` inside the innermost loop
# it would also lead to a LOT of unnessary checking
if not pHash: # use normal hash on all files
for root_dir in directories:
for path, subdirList, fileList in os.walk(root_dir):
for fname in fileList:
qualified_filename = os.path.join(path, fname)
try: # denied permission for os.stat
st = os.stat(qualified_filename)
if S_ISREG(st.st_mode):
d_list_filesize[st.st_size].append(qualified_filename)
count += 1
except:
errorlist.append(qualified_filename)
count += 1
else: # split list of normal- and image-files
for root_dir in directories:
for path, subdirList, fileList in os.walk(root_dir):
for fname in fileList:
qualified_filename = os.path.join(path, fname)
if fname.endswith(extensions):
images.append(qualified_filename)
count += 1
else:
try:
st = os.stat(qualified_filename)
if S_ISREG(st.st_mode):
d_list_filesize[st.st_size].append(qualified_filename)
count += 1
except:
errorlist.append(qualified_filename)
count += 1
# Statistic
print('\nFiles found: %s' % count)
# pre-filter all files with unique filesize
# this is where we need the dictionary
_delete_unique_entries(d_list_filesize)
# put all filtered files in a list for easier handling
prefiltered_files = [path for paths in d_list_filesize.values() for path in paths]
# Statistic
print('Possible candidates: %s\n' % (prefiltered_files.__len__() + images.__len__()))
return prefiltered_files, images, errorlist
def _delete_unique_entries(dictionary):
""" removes all Lists from a dictionary that contain a single element
Args:
dictionary a dictionary of type defaultdict(set) or defaultdict(list)
"""
mark_for_delete = []
for key in dictionary:
if dictionary[key].__len__() == 1:
mark_for_delete.append(key)
for i in mark_for_delete:
del dictionary[i]
return
def write_output_text(d_list_hash, errorlist, outfile):
""" Writes result of this module in textform to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
errorlist: list of files that could not be accessed
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('\nThe Following File-Duplicates where found:')
f.write('\n==========================================\n')
for key in d_list_hash:
f.write('Hash: %s\n' %key)
for file_path in d_list_hash[key]:
try:
f.write('%s \n' % os.path.normcase(file_path))
except:
write_errorlist.append(file_path)
f.write('-------------------\n')
if errorlist.__len__() > 0:
f.write('\nThe Following Files could not be accessed:')
f.write('\n==========================================\n')
for error in errorlist:
try:
f.write('%s\n' % os.path.normcase(error))
except:
write_errorlist.append(error)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % os.path.normcase(write_error))
print('(Please check your filesystem encoding)\n')
return
def write_output_bash(d_list_hash, outfile, create_link):
""" Writes result of this module as a bash script to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
create_link: boolean switch to select, if a deleted file should be
replaced by a hardlink
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('#!/bin/bash\n\n')
f.write('# This script is machine generated and might do harm to your\n')
f.write('# running system.\n')
f.write('# Please check this script carefully before running\n')
if create_link:
f.write('printf "replacing duplicates with hardlinks..."\n')
else:
f.write('printf "deleting duplicates..."\n')
for key in d_list_hash:
try:
original = os.path.normcase(d_list_hash[key][0])
f.write('# ------------------\n')
f.write('# Original: %s\n' % original)
for copy in d_list_hash[key][1:]:
f.write('rm %s\n' % copy)
if create_link:
f.write('ln %s %s\n' % (original, os.path.normcase(copy)))
except:
write_errorlist.append(file_path)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % write_error)
print('(Please check your filesystem encoding)\n')
return
def write_output_win(d_list_hash, outfile, create_link):
""" Writes result of this module as a batch script to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
create_link: boolean switch to select, if a deleted file should be
replaced by a hardlink
"""
write_errorlist = []
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
f.write('@ECHO OFF\n\n')
f.write('REM This script is machine generated and might do harm to your\n')
f.write('REM running system.\n')
f.write('REM Please check this script carefully before running\n')
if create_link:
f.write('ECHO "replacing duplicates with hardlinks..."\n')
else:
f.write('ECHO "deleting duplicates..."\n')
for key in d_list_hash:
try:
original = os.path.normcase(d_list_hash[key][0])
f.write('REM ------------------\n')
f.write('REM Original: %s\n' % original)
for copy in d_list_hash[key][1:]:
f.write('DEL %s\n' % copy)
if create_link:
f.write('mklink /H %s %s\n' % (os.path.normcase(copy), original))
except:
write_errorlist.append(file_path)
f.flush()
except: #IOError, UnicodeEncodeError
print('\n- Error - Could not open Output File.\n')
if write_errorlist.__len__() > 0:
print('- Error - These files could not be written to output file:\n')
for write_error in write_errorlist:
print('%s\n' % write_error)
print('(Please check your filesystem encoding)\n')
return
def write_output_json(d_list_hash, outfile):
""" Writes result of this module as JSON to a file
Args:
d_list_hash: found duplicates in form of a dictionary (key = hash-value)
outfile: the path and filename to write the output into (needs write-permission)
"""
try:
with codecs.open(outfile, 'w', encoding='utf-8') as f:
json.dump(d_list_hash, f, ensure_ascii=False, indent=4)
except:
print('\n- Error - Could not write JSON Data to file')
return
def _query_yes_no(question, default="yes"):
"""User Console Interaction for Y/N Questions.
Args:
question: String containing a Question that needs User input
default: select the default answer of the question
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
def _signal_handler(signal, frame):
sys.exit('Aborting...')
def _readable_dir(prospective_dir):
""" Checks if a given string is a valid path on the file-system
Args:
prospective_dir: file-path as String
Returns:
prospective_dir if checks are passed
Raises:
ArgumentTypeError if checks fail
"""
if not os.path.isdir(prospective_dir):
raise ArgumentTypeError('readable_dir:{0} is not a valid path'.format(prospective_dir))
if os.access(prospective_dir, os.R_OK):
return prospective_dir
else:
raise ArgumentTypeError('readable_dir:{0} is not a readable dir'.format(prospective_dir))
def main():
signal.signal(signal.SIGINT, _signal_handler)
signal.signal(signal.SIGTERM, _signal_handler)
start_time = datetime.datetime.now()
parser = ArgumentParser(description = 'Check Duplicate Files')
parser.add_argument('-i', action = 'append', dest = 'dir',
type = _readable_dir,
help = 'add directory to list for duplicate search'
)
parser.add_argument('--hash', action = 'store', dest = 'hashtype',
default = 'md5',
help = 'select hash-type (md5 (default), sha1, sha224, sha256, sha384, sha512)'
)
parser.add_argument('-p', '--perceptual-hashing', action = 'store_true',
dest = 'pHash', default = False,
help = 'enables perceptual hashing of images'
)
parser.add_argument('-o', '--output-format', action = 'store', dest = 'outformat',
default = 'text',
help = 'select output format (text, json, bash_rm, bash_link, win_del, win_link)'
)
parser.add_argument('outfile', #nargs='?',
help = 'output file for found duplicates'
)
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
args = parser.parse_args()
# disable perceptual hashing (normal hashes on all files) when PIL LIB could
# not be loaded and it is not enabled
pHash = ((not IMG_LIB_ERROR) and args.pHash)
if not pHash:
print('(Perceptual Image Scan disabled)')
# Scan all directories and find duplicates by filesize
prefiltered_filelist, images, read_errors = scan_directories(args.dir, pHash)
# Ask the user if he wants to continue, now that he knows how
# many files need to be hashed. Exclude the query-time from
# execution time
time_query = datetime.datetime.now()
if not _query_yes_no('Do you want to continue?', 'yes'):
sys.exit(0)
timedelta_query = datetime.datetime.now() - time_query # timedelta
# generate the hashed and calculate the execution time
# append possible new read-errors to the general error-list
d_list_hash = defaultdict(list)
d_list_hash, read_errors2 = generate_hashes(prefiltered_filelist, images, args.hashtype, pHash)
read_errors += read_errors2
execution_time = datetime.datetime.now() - start_time # timedelta
execution_time -= timedelta_query # timedelta
# write output
output = ['text', 'json', 'bash_rm', 'bash_link', 'win_del', 'win_link']
if args.outformat in output:
if args.outformat == 'text':
write_output_text(d_list_hash, read_errors, args.outfile)
elif args.outformat == 'json':
write_output_json(d_list_hash, args.outfile)
elif args.outformat == 'bash_rm':
write_output_bash(d_list_hash, args.outfile, False)
elif args.outformat == 'bash_link':
write_output_bash(d_list_hash, args.outfile, True)
elif args.outformat == 'win_del':
write_output_win(d_list_hash, args.outfile, False)
elif args.outformat == 'win_link':
write_output_win(d_list_hash, args.outfile, True)
else:
write_output_text(d_list_hash, read_errors, args.outfile)
print('\nExecution Time: %s.%s seconds' % (execution_time.seconds,
execution_time.microseconds))
# done
sys.exit(0)
if __name__ == '__main__':
main()
|
apache-2.0
| -369,360,307,060,061,250
| 34.862583
| 105
| 0.580979
| false
| 4.094518
| false
| false
| false
|
petrpulc/git-cmp
|
checkers/references.py
|
1
|
1113
|
"""
Reference level checker (existence of given references or all refs/heads ans refs/tags).
"""
from common import Common
from utils import check_diff
def __filter(reference_list):
return set(reference for reference in
reference_list if reference.split('/')[1] in ('heads', 'tags'))
def check():
"""
Run the checker on references.
"""
print("=== References")
if Common.args.references is None:
o_refs = __filter(Common.original.listall_references())
n_refs = __filter(Common.new.listall_references())
check_diff(o_refs, n_refs, "References", 2)
else:
o_refs = set()
for reference in Common.args.references:
if reference not in Common.original.listall_references():
print(" {} does not exist, please report".format(reference))
exit(1)
if reference not in Common.new.listall_references():
print(" {} expected, but not found".format(reference))
exit(1)
o_refs.add(reference)
print(" OK")
Common.references = o_refs
|
mit
| 2,314,850,003,653,533,000
| 30.8
| 88
| 0.602875
| false
| 4.137546
| false
| false
| false
|
mikegagnon/sidenote
|
prefix-links.py
|
1
|
1725
|
#!/usr/bin/env python
#
# This is free and unencumbered software released into the public domain.
#
# Sometimes you want to include one sidenote document into another.
# One way you could do that is copy the .md files from one project into another.
# However, this creates a risk of link-tag collisions. I.e. one project
# defines ~foo and the other project also defines ~foo.
#
# prefix-links.py solves this problem. It takes a .md file as input, then
# prefixes each link tag with a random string. Therefore ~foo becomes
# ~4C5FGAL2foo
#
# Then you can safely include .md files from multiple projects into another
# project
#
from sidenote import *
import argparse
import random
import re
import string
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
key = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
def obscure(filename):
with open(filename) as f:
lines = f.readlines()
for line in lines:
newline = ""
# tokenize the line into links and non-links
for part in LINK_PARSER.split(line):
if LINK_PARSER.match(part):
newpart = part.replace("(##", "(##" + key)
newline += newpart
else:
newline += part
if TILDE_ANCHOR_PARSER.match(newline):
newline = newline.replace("~", "~" + key)
print newline,
if __name__=="__main__":
parser = argparse.ArgumentParser(description='"Obscure" links in a Sidenote document')
parser.add_argument('file', type=str,
help='the markdown file to obscure')
args = parser.parse_args()
obscure(args.file)
|
unlicense
| 1,335,531,607,694,736,400
| 30.363636
| 115
| 0.662029
| false
| 3.799559
| false
| false
| false
|
slipstream/SlipStreamClient
|
client/src/main/python/slipstream/Logger.py
|
1
|
1610
|
import os
import errno
import logging
from logging.handlers import RotatingFileHandler
class Logger(object):
LOGGER_NAME = 'SSClient'
LOGFILE_MAXBYTES = 2*1024*1024
LOGFILE_BACKUPCOUNT = 5
LOGFILE_FORMAT = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
log_file = '/var/log/slipstream/client/slipstream-node.log'
def __init__(self, config_holder):
self.log_to_file = True
self.log_level = 'info'
self.logger_name = ''
config_holder.assign(self)
self.logger = None
self._configure_logger()
def _configure_logger(self):
self.logger = logging.getLogger(self.logger_name or Logger.LOGGER_NAME)
numeric_level = getattr(logging, self.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % self.log_level)
self.logger.setLevel(numeric_level)
formatter = logging.Formatter(self.LOGFILE_FORMAT)
if self.log_to_file:
self._create_log_dir()
handler = RotatingFileHandler(self.log_file,
maxBytes=self.LOGFILE_MAXBYTES,
backupCount=self.LOGFILE_BACKUPCOUNT)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def _create_log_dir(self):
log_dir = os.path.dirname(self.log_file)
try:
os.makedirs(log_dir)
except OSError, ex:
if ex.errno != errno.EEXIST:
raise
def get_logger(self):
return self.logger
|
apache-2.0
| -4,789,312,884,300,418,000
| 30.568627
| 79
| 0.618012
| false
| 3.851675
| false
| false
| false
|
psychopy/psychopy
|
psychopy/sound/microphone.py
|
1
|
35191
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Audio recording using a microphone.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = ['Microphone']
import sys
import psychopy.logging as logging
from psychopy.constants import NOT_STARTED, STARTED
from psychopy.preferences import prefs
from .audioclip import *
from .audiodevice import *
from .exceptions import *
import numpy as np
_hasPTB = True
try:
import psychtoolbox.audio as audio
except (ImportError, ModuleNotFoundError):
logging.warning(
"The 'psychtoolbox' library cannot be loaded but is required for audio "
"capture (use `pip install psychtoolbox` to get it). Microphone "
"recording will be unavailable this session. Note that opening a "
"microphone stream will raise an error.")
_hasPTB = False
class RecordingBuffer(object):
"""Class for a storing a recording from a stream.
Think of instances of this class behaving like an audio tape whereas the
`Microphone` class is the tape recorder. Samples taken from the stream are
written to the tape which stores the data.
Used internally by the `Microphone` class, users usually do not create
instances of this class themselves.
Parameters
----------
sampleRateHz : int
Sampling rate for audio recording in Hertz (Hz). By default, 48kHz
(``sampleRateHz=480000``) is used which is adequate for most consumer
grade microphones (headsets and built-in).
channels : int
Number of channels to record samples to `1=Mono` and `2=Stereo`.
maxRecordingSize : int
Maximum recording size in kilobytes (Kb). Since audio recordings tend to
consume a large amount of system memory, one might want to limit the
size of the recording buffer to ensure that the application does not run
out of memory. By default, the recording buffer is set to 24000 KB (or
24 MB). At a sample rate of 48kHz, this will result in 62.5 seconds of
continuous audio being recorded before the buffer is full.
policyWhenFull : str
What to do when the recording buffer is full and cannot accept any more
samples. If 'ignore', samples will be silently dropped and the `isFull`
property will be set to `True`. If 'warn', a warning will be logged and
the `isFull` flag will be set. Finally, if 'error' the application will
raise an exception.
"""
def __init__(self, sampleRateHz=SAMPLE_RATE_48kHz, channels=2,
maxRecordingSize=24000, policyWhenFull='ignore'):
self._channels = channels
self._sampleRateHz = sampleRateHz
self._maxRecordingSize = maxRecordingSize
self._samples = None # `ndarray` created in _allocRecBuffer`
self._offset = 0 # recording offset
self._lastSample = 0 # offset of the last sample from stream
self._spaceRemaining = None # set in `_allocRecBuffer`
self._totalSamples = None # set in `_allocRecBuffer`
# check if the value is valid
if policyWhenFull not in ['ignore', 'warn', 'error']:
raise ValueError("Invalid value for `policyWhenFull`.")
self._policyWhenFull = policyWhenFull
self._warnedRecBufferFull = False
self._loops = 0
self._allocRecBuffer()
def _allocRecBuffer(self):
"""Allocate the recording buffer. Called internally if properties are
changed."""
# allocate another array
nBytes = self._maxRecordingSize * 1000
recArraySize = int((nBytes / self._channels) / (np.float32()).itemsize)
self._samples = np.zeros(
(recArraySize, self._channels), dtype=np.float32, order='C')
# sanity check
assert self._samples.nbytes == nBytes
self._totalSamples = len(self._samples)
self._spaceRemaining = self._totalSamples
@property
def samples(self):
"""Reference to the actual sample buffer (`ndarray`)."""
return self._samples
@property
def bufferSecs(self):
"""Capacity of the recording buffer in seconds (`float`)."""
return self._totalSamples / self._sampleRateHz
@property
def nbytes(self):
"""Number of bytes the recording buffer occupies in memory (`int`)."""
return self._samples.nbytes
@property
def sampleBytes(self):
"""Number of bytes per sample (`int`)."""
return np.float32().itemsize
@property
def spaceRemaining(self):
"""The space remaining in the recording buffer (`int`). Indicates the
number of samples that the buffer can still add before overflowing.
"""
return self._spaceRemaining
@property
def isFull(self):
"""Is the recording buffer full (`bool`)."""
return self._spaceRemaining <= 0
@property
def totalSamples(self):
"""Total number samples the recording buffer can hold (`int`)."""
return self._totalSamples
@property
def writeOffset(self):
"""Index in the sample buffer where new samples will be written when
`write()` is called (`int`).
"""
return self._offset
@property
def lastSample(self):
"""Index of the last sample recorded (`int`). This can be used to slice
the recording buffer, only getting data from the beginning to place
where the last sample was written to.
"""
return self._lastSample
@property
def loopCount(self):
"""Number of times the recording buffer restarted (`int`). Only valid if
`loopback` is ``True``."""
return self._loops
@property
def maxRecordingSize(self):
"""Maximum recording size in kilobytes (`int`).
Since audio recordings tend to consume a large amount of system memory,
one might want to limit the size of the recording buffer to ensure that
the application does not run out of memory. By default, the recording
buffer is set to 24000 KB (or 24 MB). At a sample rate of 48kHz, this
will result in 62.5 seconds of continuous audio being recorded before
the buffer is full.
Setting this value will allocate another recording buffer of appropriate
size. Avoid doing this in any time sensitive parts of your application.
"""
return self._maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
value = int(value)
# don't do this unless the value changed
if value == self._maxRecordingSize:
return
# if different than last value, update the recording buffer
self._maxRecordingSize = value
self._allocRecBuffer()
def seek(self, offset, absolute=False):
"""Set the write offset.
Use this to specify where to begin writing samples the next time `write`
is called. You should call `seek(0)` when starting a new recording.
Parameters
----------
offset : int
Position in the sample buffer to set.
absolute : bool
Use absolute positioning. Use relative positioning if `False` where
the value of `offset` will be added to the current offset. Default
is `False`.
"""
if not absolute:
self._offset += offset
else:
self._offset = absolute
assert 0 <= self._offset < self._totalSamples
self._spaceRemaining = self._totalSamples - self._offset
def write(self, samples):
"""Write samples to the recording buffer.
Parameters
----------
samples : ArrayLike
Samples to write to the recording buffer, usually of a stream. Must
have the same number of dimensions as the internal array.
Returns
-------
int
Number of samples overflowed. If this is zero then all samples have
been recorded, if not, the number of samples rejected is given.
"""
nSamples = len(samples)
if self.isFull:
if self._policyWhenFull == 'ignore':
return nSamples # samples lost
elif self._policyWhenFull == 'warn':
if not self._warnedRecBufferFull:
logging.warning(
f"Audio recording buffer filled! This means that no "
f"samples are saved beyond {round(self.bufferSecs, 6)} "
f"seconds. Specify a larger recording buffer next time "
f"to avoid data loss.")
logging.flush()
self._warnedRecBufferFull = True
return nSamples
elif self._policyWhenFull == 'error':
raise AudioRecordingBufferFullError(
"Cannot write samples, recording buffer is full.")
else:
return nSamples # whatever
if not nSamples: # no samples came out of the stream, just return
return
if self._spaceRemaining >= nSamples:
self._lastSample = self._offset + nSamples
audioData = samples[:, :]
else:
self._lastSample = self._offset + self._spaceRemaining
audioData = samples[:self._spaceRemaining, :]
self._samples[self._offset:self._lastSample, :] = audioData
self._offset += nSamples
self._spaceRemaining -= nSamples
# Check if the recording buffer is now full. Next call to `poll` will
# not record anything.
if self._spaceRemaining <= 0:
self._spaceRemaining = 0
d = nSamples - self._spaceRemaining
return 0 if d < 0 else d
def clear(self):
# reset all live attributes
self._samples = None
self._offset = 0
self._lastSample = 0
self._spaceRemaining = None
self._totalSamples = None
# reallocate buffer
self._allocRecBuffer()
def getSegment(self, start=0, end=None):
"""Get a segment of recording data as an `AudioClip`.
Parameters
----------
start : float or int
Absolute time in seconds for the start of the clip.
end : float or int
Absolute time in seconds for the end of the clip. If `None` the time
at the last sample is used.
Returns
-------
AudioClip
Audio clip object with samples between `start` and `end`.
"""
idxStart = int(start * self._sampleRateHz)
idxEnd = self._lastSample if end is None else int(
end * self._sampleRateHz)
return AudioClip(
np.array(self._samples[idxStart:idxEnd, :],
dtype=np.float32, order='C'),
sampleRateHz=self._sampleRateHz)
class Microphone(object):
"""Class for recording audio from a microphone or input stream.
Creating an instance of this class will open a stream using the specified
device. Streams should remain open for the duration of your session. When a
stream is opened, a buffer is allocated to store samples coming off it.
Samples from the input stream will written to the buffer once
:meth:`~Microphone.start()` is called.
Parameters
----------
device : int or `~psychopy.sound.AudioDevice`
Audio capture device to use. You may specify the device either by index
(`int`) or descriptor (`AudioDevice`).
sampleRateHz : int
Sampling rate for audio recording in Hertz (Hz). By default, 48kHz
(``sampleRateHz=480000``) is used which is adequate for most consumer
grade microphones (headsets and built-in).
channels : int
Number of channels to record samples to `1=Mono` and `2=Stereo`.
streamBufferSecs : float
Stream buffer size to pre-allocate for the specified number of seconds.
The default is 2.0 seconds which is usually sufficient.
maxRecordingSize : int
Maximum recording size in kilobytes (Kb). Since audio recordings tend to
consume a large amount of system memory, one might want to limit the
size of the recording buffer to ensure that the application does not run
out of memory. By default, the recording buffer is set to 24000 KB (or
24 MB). At a sample rate of 48kHz, this will result in 62.5 seconds of
continuous audio being recorded before the buffer is full.
audioLatencyMode : int or None
Audio latency mode to use, values range between 0-4. If `None`, the
setting from preferences will be used. Using `3` (exclusive mode) is
adequate for most applications and required if using WASAPI on Windows
for other settings (such audio quality) to take effect. Symbolic
constants `psychopy.sound.audiodevice.AUDIO_PTB_LATENCY_CLASS_` can also
be used.
audioRunMode : int
Run mode for the recording device. Default is standby-mode (`0`) which
allows the system to put the device to sleep. However when the device is
needed, waking the device results in some latency. Using a run mode of
`1` will keep the microphone running (or 'hot') with reduces latency
when th recording is started. Cannot be set when after initialization at
this time.
Examples
--------
Capture 10 seconds of audio from the primary microphone::
import psychopy.core as core
import psychopy.sound.Microphone as Microphone
mic = Microphone(bufferSecs=10.0) # open the microphone
mic.start() # start recording
core.wait(10.0) # wait 10 seconds
mic.stop() # stop recording
audioClip = mic.getRecording()
print(audioClip.duration) # should be ~10 seconds
audioClip.save('test.wav') # save the recorded audio as a 'wav' file
The prescribed method for making long recordings is to poll the stream once
per frame (or every n-th frame)::
mic = Microphone(bufferSecs=2.0)
mic.start() # start recording
# main trial drawing loop
mic.poll()
win.flip() # calling the window flip function
mic.stop() # stop recording
audioClip = mic.getRecording()
"""
# Force the use of WASAPI for audio capture on Windows. If `True`, only
# WASAPI devices will be returned when calling static method
# `Microphone.getDevices()`
enforceWASAPI = True
def __init__(self,
device=None,
sampleRateHz=None,
channels=2,
streamBufferSecs=2.0,
maxRecordingSize=24000,
policyWhenFull='warn',
audioLatencyMode=None,
audioRunMode=0):
if not _hasPTB: # fail if PTB is not installed
raise ModuleNotFoundError(
"Microphone audio capture requires package `psychtoolbox` to "
"be installed.")
# get information about the selected device
devices = Microphone.getDevices()
if isinstance(device, AudioDeviceInfo):
self._device = device
elif isinstance(device, (int, float)):
devicesByIndex = {d.deviceIndex: d for d in devices}
if device in devicesByIndex:
self._device = devicesByIndex[device]
else:
raise AudioInvalidCaptureDeviceError(
'No suitable audio recording devices found matching index '
'{}.'.format(device))
else:
# get default device, first enumerated usually
if not devices:
raise AudioInvalidCaptureDeviceError(
'No suitable audio recording devices found on this system. '
'Check connections and try again.')
self._device = devices[0] # use first
logging.info('Using audio device #{} ({}) for audio capture'.format(
self._device.deviceIndex, self._device.deviceName))
# error if specified device is not suitable for capture
if not self._device.isCapture:
raise AudioInvalidCaptureDeviceError(
'Specified audio device not suitable for audio recording. '
'Has no input channels.')
# get the sample rate
self._sampleRateHz = \
self._device.defaultSampleRate if sampleRateHz is None else int(
sampleRateHz)
logging.debug('Set stream sample rate to {} Hz'.format(
self._sampleRateHz))
# set the audio latency mode
if audioLatencyMode is None:
self._audioLatencyMode = int(prefs.hardware["audioLatencyMode"])
else:
self._audioLatencyMode = audioLatencyMode
logging.debug('Set audio latency mode to {}'.format(
self._audioLatencyMode))
assert 0 <= self._audioLatencyMode <= 4 # sanity check for pref
# set the number of recording channels
self._channels = \
self._device.inputChannels if channels is None else int(channels)
logging.debug('Set recording channels to {} ({})'.format(
self._channels, 'stereo' if self._channels > 1 else 'mono'))
if self._channels > self._device.inputChannels:
raise AudioInvalidDeviceError(
'Invalid number of channels for audio input specified.')
# internal recording buffer size in seconds
assert isinstance(streamBufferSecs, (float, int))
self._streamBufferSecs = float(streamBufferSecs)
# PTB specific stuff
self._mode = 2 # open a stream in capture mode
# Handle for the recording stream, should only be opened once per
# session
logging.debug('Opening audio stream for device #{}'.format(
self._device.deviceIndex))
self._stream = audio.Stream(
device_id=self._device.deviceIndex,
latency_class=self._audioLatencyMode,
mode=self._mode,
freq=self._sampleRateHz,
channels=self._channels)
logging.debug('Stream opened')
assert isinstance(audioRunMode, (float, int)) and \
(audioRunMode == 0 or audioRunMode == 1)
self._audioRunMode = int(audioRunMode)
self._stream.run_mode = self._audioRunMode
logging.debug('Set run mode to `{}`'.format(
self._audioRunMode))
# set latency bias
self._stream.latency_bias = 0.0
logging.debug('Set stream latency bias to {} ms'.format(
self._stream.latency_bias))
# pre-allocate recording buffer, called once
self._stream.get_audio_data(self._streamBufferSecs)
logging.debug(
'Allocated stream buffer to hold {} seconds of data'.format(
self._streamBufferSecs))
# status flag
self._statusFlag = NOT_STARTED
# setup recording buffer
self._recording = RecordingBuffer(
sampleRateHz=self._sampleRateHz,
channels=self._channels,
maxRecordingSize=maxRecordingSize,
policyWhenFull=policyWhenFull
)
# setup clips and transcripts dicts
self.clips = {}
self.lastClip = None
self.scripts = {}
self.lastScript = None
logging.debug('Audio capture device #{} ready'.format(
self._device.deviceIndex))
@staticmethod
def getDevices():
"""Get a `list` of audio capture device (i.e. microphones) descriptors.
On Windows, only WASAPI devices are used.
Returns
-------
list
List of `AudioDevice` descriptors for suitable capture devices. If
empty, no capture devices have been found.
"""
try:
Microphone.enforceWASAPI = bool(prefs.hardware["audioForceWASAPI"])
except KeyError:
pass # use default if option not present in settings
# query PTB for devices
if Microphone.enforceWASAPI and sys.platform == 'win32':
allDevs = audio.get_devices(device_type=13)
else:
allDevs = audio.get_devices()
# make sure we have an array of descriptors
allDevs = [allDevs] if isinstance(allDevs, dict) else allDevs
# create list of descriptors only for capture devices
inputDevices = [desc for desc in [
AudioDeviceInfo.createFromPTBDesc(dev) for dev in allDevs]
if desc.isCapture]
return inputDevices
# def warmUp(self):
# """Warm-/wake-up the audio stream.
#
# On some systems the first time `start` is called incurs additional
# latency, whereas successive calls do not. To deal with this, it is
# recommended that you run this warm-up routine prior to capturing audio
# samples. By default, this routine is called when instancing a new
# microphone object.
#
# """
# # We should put an actual test here to see if timing stabilizes after
# # multiple invocations of this function.
# self._stream.start()
# self._stream.stop()
@property
def recording(self):
"""Reference to the current recording buffer (`RecordingBuffer`)."""
return self._recording
@property
def recBufferSecs(self):
"""Capacity of the recording buffer in seconds (`float`)."""
return self.recording.bufferSecs
@property
def maxRecordingSize(self):
"""Maximum recording size in kilobytes (`int`).
Since audio recordings tend to consume a large amount of system memory,
one might want to limit the size of the recording buffer to ensure that
the application does not run out. By default, the recording buffer is
set to 64000 KB (or 64 MB). At a sample rate of 48kHz, this will result
in about. Using stereo audio (``nChannels == 2``) requires twice the
buffer over mono (``nChannels == 2``) for the same length clip.
Setting this value will allocate another recording buffer of appropriate
size. Avoid doing this in any time sensitive parts of your application.
"""
return self._recording.maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
self._recording.maxRecordingSize = value
@property
def latencyBias(self):
"""Latency bias to add when starting the microphone (`float`).
"""
return self._stream.latency_bias
@latencyBias.setter
def latencyBias(self, value):
self._stream.latency_bias = float(value)
@property
def audioLatencyMode(self):
"""Audio latency mode in use (`int`). Cannot be set after
initialization.
"""
return self._audioLatencyMode
@property
def streamBufferSecs(self):
"""Size of the internal audio storage buffer in seconds (`float`).
To ensure all data is captured, there must be less time elapsed between
subsequent `getAudioClip` calls than `bufferSecs`.
"""
return self._streamBufferSecs
@property
def status(self):
"""Status flag for the microphone. Value can be one of
``psychopy.constants.STARTED`` or ``psychopy.constants.NOT_STARTED``.
For detailed stream status information, use the
:attr:`~psychopy.sound.microphone.Microphone.streamStatus` property.
"""
if hasattr(self, "_statusFlag"):
return self._statusFlag
@status.setter
def status(self, value):
self._statusFlag = value
@property
def streamStatus(self):
"""Status of the audio stream (`AudioDeviceStatus` or `None`).
See :class:`~psychopy.sound.AudioDeviceStatus` for a complete overview
of available status fields. This property has a value of `None` if
the stream is presently closed.
Examples
--------
Get the capture start time of the stream::
# assumes mic.start() was called
captureStartTime = mic.status.captureStartTime
Check if microphone recording is active::
isActive = mic.status.active
Get the number of seconds recorded up to this point::
recordedSecs = mic.status.recordedSecs
"""
currentStatus = self._stream.status
if currentStatus != -1:
return AudioDeviceStatus.createFromPTBDesc(currentStatus)
@property
def isRecBufferFull(self):
"""`True` if there is an overflow condition with the recording buffer.
If this is `True`, then `poll()` is still collecting stream samples but
is no longer writing them to anything, causing stream samples to be
lost.
"""
return self._recording.isFull
@property
def isStarted(self):
"""``True`` if stream recording has been started (`bool`)."""
return self.status == STARTED
def start(self, when=None, waitForStart=0, stopTime=None):
"""Start an audio recording.
Calling this method will begin capturing samples from the microphone and
writing them to the buffer.
Parameters
----------
when : float, int or None
When to start the stream. If the time specified is a floating point
(absolute) system time, the device will attempt to begin recording
at that time. If `None` or zero, the system will try to start
recording as soon as possible.
waitForStart : bool
Wait for sound onset if `True`.
stopTime : float, int or None
Number of seconds to record. If `None` or `-1`, recording will
continue forever until `stop` is called.
Returns
-------
float
Absolute time the stream was started.
"""
# check if the stream has been
if self.isStarted:
raise AudioStreamError(
"Cannot start a stream, already started.")
if self._stream is None:
raise AudioStreamError("Stream not ready.")
# reset the writing 'head'
self._recording.seek(0, absolute=True)
# reset warnings
# self._warnedRecBufferFull = False
startTime = self._stream.start(
repetitions=0,
when=when,
wait_for_start=int(waitForStart),
stop_time=stopTime)
# recording has begun or is scheduled to do so
self._statusFlag = STARTED
logging.debug(
'Scheduled start of audio capture for device #{} at t={}.'.format(
self._device.deviceIndex, startTime))
return startTime
def record(self, when=None, waitForStart=0, stopTime=None):
"""Start an audio recording (alias of `.start()`).
Calling this method will begin capturing samples from the microphone and
writing them to the buffer.
Parameters
----------
when : float, int or None
When to start the stream. If the time specified is a floating point
(absolute) system time, the device will attempt to begin recording
at that time. If `None` or zero, the system will try to start
recording as soon as possible.
waitForStart : bool
Wait for sound onset if `True`.
stopTime : float, int or None
Number of seconds to record. If `None` or `-1`, recording will
continue forever until `stop` is called.
Returns
-------
float
Absolute time the stream was started.
"""
return self.start(
when=when,
waitForStart=waitForStart,
stopTime=stopTime)
def stop(self, blockUntilStopped=True, stopTime=None):
"""Stop recording audio.
Call this method to end an audio recording if in progress. This will
simply halt recording and not close the stream. Any remaining samples
will be polled automatically and added to the recording buffer.
Parameters
----------
blockUntilStopped : bool
Halt script execution until the stream has fully stopped.
stopTime : float or None
Scheduled stop time for the stream in system time. If `None`, the
stream will stop as soon as possible.
Returns
-------
tuple
Tuple containing `startTime`, `endPositionSecs`, `xruns` and
`estStopTime`.
"""
if not self.isStarted:
raise AudioStreamError(
"Cannot stop a stream that has not been started.")
# poll remaining samples, if any
if not self.isRecBufferFull:
self.poll()
startTime, endPositionSecs, xruns, estStopTime = self._stream.stop(
block_until_stopped=int(blockUntilStopped),
stopTime=stopTime)
self._statusFlag = NOT_STARTED
logging.debug(
('Device #{} stopped capturing audio samples at estimated time '
't={}. Total overruns: {} Total recording time: {}').format(
self._device.deviceIndex, estStopTime, xruns, endPositionSecs))
return startTime, endPositionSecs, xruns, estStopTime
def pause(self, blockUntilStopped=True, stopTime=None):
"""Pause a recording (alias of `.stop`).
Call this method to end an audio recording if in progress. This will
simply halt recording and not close the stream. Any remaining samples
will be polled automatically and added to the recording buffer.
Parameters
----------
blockUntilStopped : bool
Halt script execution until the stream has fully stopped.
stopTime : float or None
Scheduled stop time for the stream in system time. If `None`, the
stream will stop as soon as possible.
Returns
-------
tuple
Tuple containing `startTime`, `endPositionSecs`, `xruns` and
`estStopTime`.
"""
return self.stop(blockUntilStopped=blockUntilStopped, stopTime=stopTime)
def close(self):
"""Close the stream.
Should not be called until you are certain you're done with it. Ideally,
you should never close and reopen the same stream within a single
session.
"""
self._stream.close()
logging.debug('Stream closed')
def poll(self):
"""Poll audio samples.
Calling this method adds audio samples collected from the stream buffer
to the recording buffer that have been captured since the last `poll`
call. Time between calls of this function should be less than
`bufferSecs`. You do not need to call this if you call `stop` before
the time specified by `bufferSecs` elapses since the `start` call.
Can only be called between called of `start` (or `record`) and `stop`
(or `pause`).
Returns
-------
int
Number of overruns in sampling.
"""
if not self.isStarted:
raise AudioStreamError(
"Cannot poll samples from audio device, not started.")
# figure out what to do with this other information
audioData, absRecPosition, overflow, cStartTime = \
self._stream.get_audio_data()
if overflow:
logging.warning(
"Audio stream buffer overflow, some audio samples have been "
"lost! To prevent this, ensure `Microphone.poll()` is being "
"called often enough, or increase the size of the audio buffer "
"with `bufferSecs`.")
overruns = self._recording.write(audioData)
return overruns
def bank(self, tag=None, transcribe=False, **kwargs):
"""Store current buffer as a clip within the microphone object.
This method is used internally by the Microphone component in Builder,
don't use it for other applications. Either `stop()` or `pause()` must
be called before calling this method.
Parameters
----------
tag : str or None
Label for the clip.
transcribe : bool or str
Set to the name of a transcription engine (e.g. "GOOGLE") to
transcribe using that engine, or set as `False` to not transcribe.
kwargs : dict
Additional keyword arguments to pass to
:class:`~psychopy.sound.AudioClip.transcribe()`.
"""
# make sure the tag exists in both clips and transcripts dicts
if tag not in self.clips:
self.clips[tag] = []
if tag not in self.scripts:
self.scripts[tag] = []
# append current recording to clip list according to tag
self.lastClip = self.getRecording()
self.clips[tag].append(self.lastClip)
# append current clip's transcription according to tag
if transcribe:
if transcribe in ('Built-in', True, 'BUILT_IN', 'BUILT-IN',
'Built-In', 'built-in'):
engine = "sphinx"
elif type(transcribe) == str:
engine = transcribe
self.lastScript = self.lastClip.transcribe(
engine=engine, **kwargs)
else:
self.lastScript = "Transcription disabled."
self.scripts[tag].append(self.lastScript)
# clear recording buffer
self._recording.clear()
# return banked items
if transcribe:
return self.lastClip, self.lastScript
else:
return self.lastClip
def clear(self):
"""Wipe all clips. Deletes previously banked audio clips.
"""
# clear clips
self.clips = {}
# clear recording
self._recording.clear()
def flush(self):
"""Get a copy of all banked clips, then clear the clips from storage."""
# get copy of clips dict
clips = self.clips.copy()
# clear
self.clear()
return clips
def getRecording(self):
"""Get audio data from the last microphone recording.
Call this after `stop` to get the recording as an `AudioClip` object.
Raises an error if a recording is in progress.
Returns
-------
AudioClip
Recorded data between the last calls to `start` (or `record`) and
`stop`.
"""
if self.isStarted:
raise AudioStreamError(
"Cannot get audio clip, recording was in progress. Be sure to "
"call `Microphone.stop` first.")
return self._recording.getSegment() # full recording
if __name__ == "__main__":
pass
|
gpl-3.0
| 1,498,106,364,352,994,300
| 34.909184
| 80
| 0.613424
| false
| 4.634664
| false
| false
| false
|
sipwise/repoapi
|
build/test/test_utils.py
|
1
|
11161
|
# Copyright (C) 2017-2020 The Sipwise Team - http://sipwise.com
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from unittest.mock import patch
from django.test import override_settings
from django.test import SimpleTestCase
from build import exceptions as err
from build.conf import settings
from build.utils import get_common_release
from build.utils import get_simple_release
from build.utils import is_release_trunk
from build.utils import ReleaseConfig
from build.utils import trigger_build
from build.utils import trigger_copy_deps
class SimpleIsReleaseTrunkTest(SimpleTestCase):
def test_trunk(self):
ok, val = is_release_trunk("trunk")
self.assertFalse(ok)
self.assertIsNone(val)
def test_mrXX(self):
ok, val = is_release_trunk("release-mr8.5")
self.assertFalse(ok)
self.assertIsNone(val)
def test_release_trunk(self):
ok, val = is_release_trunk("release-trunk-buster")
self.assertTrue(ok)
self.assertEqual(val, "buster")
ok, val = is_release_trunk("release-trunk-bullseye")
self.assertTrue(ok)
self.assertEqual(val, "bullseye")
class SimpleReleaseTest(SimpleTestCase):
def test_trunk(self):
val = get_simple_release("release-trunk-buster")
self.assertEqual(val, "trunk")
def test_branch_release(self):
val = get_simple_release("release-mr8.0")
self.assertEqual(val, "mr8.0")
def test_release_ok(self):
val = get_simple_release("release-mr8.1.1")
self.assertEqual(val, "mr8.1.1")
def test_release_update_ok(self):
val = get_simple_release("release-mr8.1-update")
self.assertEqual(val, "mr8.1")
def test_release_ko(self):
val = get_simple_release("mr8.1.1")
self.assertIsNone(val)
class CommonReleaseTest(SimpleTestCase):
def test_trunk(self):
val = get_common_release("release-trunk-buster")
self.assertEqual(val, "master")
def test_branch_release(self):
val = get_common_release("release-mr8.0")
self.assertEqual(val, "mr8.0")
def test_release_ok(self):
val = get_common_release("mr8.1.1")
self.assertEqual(val, "mr8.1")
def test_release_ko(self):
val = get_common_release("whatever-mr8.1.1")
self.assertIsNone(val)
class ReleaseConfigTestCase(SimpleTestCase):
build_deps = [
"data-hal",
"ngcp-schema",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
@override_settings(BUILD_RELEASES_SKIP=["mr0.1"])
def test_supported_releases(self):
supported = [
"release-trunk-buster",
"release-trunk-bullseye",
"mr8.1.2",
"mr8.1",
"mr7.5.3",
"mr7.5.2",
"mr7.5.1",
"mr7.5",
]
res = ReleaseConfig.supported_releases()
self.assertListEqual(res, supported)
@patch.object(ReleaseConfig, "supported_releases")
def test_supported_releases_dict(self, sr):
res_ok = [
{"release": "release-trunk-buster", "base": "master"},
{"release": "mr8.0", "base": "mr8.0"},
{"release": "mr8.0.1", "base": "mr8.0"},
{"release": "mr7.5.1", "base": "mr7.5"},
]
sr.return_value = [
"release-trunk-buster",
"mr8.0",
"mr8.0.1",
"mr7.5.1",
]
res = ReleaseConfig.supported_releases_dict()
self.assertListEqual(res, res_ok)
def test_no_release_config(self):
with self.assertRaises(err.NoConfigReleaseFile):
ReleaseConfig("fake_release")
def test_no_jenkins_jobs(self):
with self.assertRaises(err.NoJenkinsJobsInfo):
ReleaseConfig("mr0.1")
def test_ok(self):
rc = ReleaseConfig("trunk")
self.assertIsNotNone(rc.config)
self.assertListEqual(list(rc.build_deps.keys()), self.build_deps)
self.assertEqual(rc.debian_release, "buster")
self.assertEqual(len(rc.projects), 73)
def test_debian_release_value(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.debian_release, "buster")
rc = ReleaseConfig("release-trunk-bullseye")
self.assertEqual(rc.debian_release, "bullseye")
rc = ReleaseConfig("trunk", "bullseye")
self.assertEqual(rc.debian_release, "bullseye")
# distribution parameter is only used with trunk
rc = ReleaseConfig("release-mr8.1-update", "bullseye")
self.assertEqual(rc.debian_release, "buster")
def test_release_value(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.release, "trunk")
def test_branch_tag_value_trunk(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.branch, "master")
self.assertIsNone(rc.tag)
def test_branch_tag_value_mrXX(self):
rc = ReleaseConfig("mr8.1")
self.assertEqual(rc.branch, "mr8.1")
self.assertIsNone(rc.tag)
def test_branch_tag_value_mrXXX(self):
rc = ReleaseConfig("mr7.5.2")
self.assertEqual(rc.branch, "mr7.5.2")
self.assertEqual(rc.tag, "mr7.5.2.1")
def test_build_deps(self):
rc = ReleaseConfig("trunk")
build_deps = [
"data-hal",
"ngcp-schema",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
self.assertListEqual(list(rc.build_deps.keys()), build_deps)
def test_build_deps_iter_step_1(self):
rc = ReleaseConfig("trunk")
build_deps = [
"data-hal",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
values = []
for prj in rc.wanna_build_deps(0):
values.append(prj)
self.assertListEqual(build_deps, values)
def test_build_deps_iter_step_2(self):
rc = ReleaseConfig("trunk")
values = []
for prj in rc.wanna_build_deps(1):
values.append(prj)
self.assertListEqual(["ngcp-schema"], values)
@patch("build.utils.open_jenkins_url")
class TriggerBuild(SimpleTestCase):
def test_project_build(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "release-mr8.2",
"trigger_branch_or_tag": "branch/mr8.2",
"trigger_distribution": "buster",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={trigger_release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"branch=mr8.2&tag=none&"
"release={trigger_release}&distribution={trigger_distribution}"
)
res = trigger_build(**params)
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_project_build_uuid(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "release-mr8.2",
"trigger_branch_or_tag": "branch/mr8.2",
"trigger_distribution": "buster",
}
res = [trigger_build(**params), trigger_build(**params)]
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res[0], "{base}/job/{project}/".format(**params))
self.assertEqual(res[0], res[1])
uuids = list()
self.assertEqual(len(openurl.call_args_list), 2)
for call in openurl.call_args_list:
m = re.match(r".+&uuid=([^&]+)&.+", str(call))
self.assertIsNotNone(m)
uuids.append(m.groups(0))
self.assertNotEqual(uuids[0], uuids[1])
def test_copy_debs_build(self, openurl):
params = {
"release": "release-mr8.2",
"internal": True,
"release_uuid": "UUID_mr8.2",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"release=mr8.2&internal=true"
)
res = trigger_copy_deps(**params)
params["project"] = "release-copy-debs-yml"
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_project_build_trunk(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "trunk",
"trigger_branch_or_tag": "branch/master",
"trigger_distribution": "buster",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={trigger_release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"branch=master&tag=none&"
"release=trunk&distribution={trigger_distribution}"
)
res = trigger_build(**params)
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_copy_debs_build_trunk(self, openurl):
params = {
"release": "release-trunk-buster",
"internal": True,
"release_uuid": "UUID_master",
"uuid": "UUID_B",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"release=release-trunk-buster&internal=true"
)
res = trigger_copy_deps(**params)
params["project"] = "release-copy-debs-yml"
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
|
gpl-3.0
| -7,227,835,457,043,220,000
| 33.55418
| 77
| 0.585431
| false
| 3.58644
| true
| false
| false
|
grovesdixon/metaTranscriptomes
|
scripts/parse_codeml_pairwise_outputBACKUP.py
|
1
|
6189
|
#!/usr/bin/env python
##parse_codeml_pairwise_output.py
##written 6/26/14 by Groves Dixon
ProgramName = 'parse_codeml_pairwise_output.py'
LastUpdated = '6/26/14'
By = 'Groves Dixon'
VersionNumber = '1.0'
print "\nRunning Program {}...".format(ProgramName)
VersionString = '{} version {} Last Updated {} by {}'.format(ProgramName, VersionNumber, LastUpdated, By)
Description = '''
Description:
Parses a list of codeml output files that were generated using pair-wise
dN/dS estimation (runmode -2). Pairs are set up against one base species
(set as spp1) and all other species (a list file)
'''
AdditionalProgramInfo = '''
Additional Program Information:
'''
##Import Modules
import time
import argparse
from sys import argv
from sys import exit
import numpy as np
Start_time = time.time() ##keeps track of how long the script takes to run
##Set Up Argument Parsing
parser = argparse.ArgumentParser(description=Description, epilog=AdditionalProgramInfo) ##create argument parser that will automatically return help texts from global variables above
parser.add_argument('-f', required = True, dest = 'files', nargs="+", help = 'A glob to the codeml output files (probably *.codeml)')
parser.add_argument('-spp1', required = True, dest = 'spp1', help = 'The search tag for species 1')
parser.add_argument('-sppList', required = True, dest = 'sppList', help = 'The List of species to pair with species 1')
parser.add_argument('-o', required = True, dest = 'out', help = 'The desired output file name')
args = parser.parse_args()
#Assign Arguments
FileList = args.files
Spp1 = args.spp1
SppListName = args.sppList
OutfileName = args.out
SppList = []
with open(SppListName, 'r') as infile:
for line in infile:
SppList.append(line.strip("\n"))
def read_files(FileList, Spp1, SppList):
'''Function to reads through each file and parses out
dN and dS estimates for the specified species pair.
'''
print "\nLooking for data in {} codeml output files...".format(len(FileList))
geneList = []
dNList = []
dSList = []
speciesList = []
highDScount = 0
for species in SppList:
if species == Spp1:
continue
for file in FileList:
with open(file, 'r') as infile:
hit = 0
hitCount = 0 #this should never exceed 1
for line in infile:
if hitCount > 1:
exit("Found more than one instance of pairing in a file. Something is wrong.")
if hit == 0:
##look for your species pair
if "("+Spp1+")" in line:
if "("+species+")" in line:
if "..." in line:
hit = 1
continue
elif hit == 1:
if "dN/dS=" in line:
line = line.split()
try:
dn = line[10]
ds = line[13]
except IndexError: #occurs sometimes when dS is very large
#the dn value is also sometimes so high it must be split differently
#this probably means its a bad alignment/ortholog call, but pasrse it anyway
try:
dn = line[10]
ds = line[12]
#it's rare, but possible that N is double digits and S is not, so only "strip" the = from the front of ds if its there
if "=" in ds:
ds = ds.split('=')[1] #split the large ds value assuming that dS is >= 10.0 but dN is not
except IndexError:
dn = line[9].split('=')[1] #this means that the dN value was also >= 10.0, so grab it differently
ds = line[11].split('=')[1] #dS is also in a different place because of the big dN, so grab it
geneName = file.strip(".codeml")
geneList.append(geneName)
dNList.append(dn)
dSList.append(ds)
speciesList.append(species)
hit = 0
hitCount += 1
# print geneName
# print species
# print dn
return geneList, dNList, dSList, speciesList
def output(OutfileName, geneList, dNList, dSList, speciesList):
"""Outputs the data into a table"""
badValues = []
lineNums = []
with open(OutfileName, 'w') as out:
out.write("EST\tspecies\tdN\tdS")
for i in range(len(geneList)):
#########
##there is a bug that occurs when the synonymous substitution rate is >99.99
#these are obviously wrong anyway and they stop the output from uploading into R so skip them
fourData = 'TRUE'
outList = [geneList[i], speciesList[i], dNList[i], dSList[i]]
try:
float(dNList[i])
float(dSList[i])
except ValueError:
badValues.append([dNList[i], dSList[i]])
lineNums.append(i)
continue
for x in outList:
if x == "":
fourData = 'FALSE'
if fourData == 'FALSE':
continue
###########
outString = "\n{}\t{}\t{}\t{}".format(geneList[i], speciesList[i], dNList[i], dSList[i])
out.write("\n{}\t{}\t{}\t{}".format(geneList[i], speciesList[i], dNList[i], dSList[i]))
geneList, dNList, dSList, speciesList = read_files(FileList, Spp1, SppList)
output(OutfileName, geneList, dNList, dSList, speciesList)
#return time to run
Time = time.time() - Start_time
print('\nTime took to run: {}'.format(Time))
|
mit
| 5,886,263,383,535,908,000
| 40.536913
| 182
| 0.527549
| false
| 4.207342
| false
| false
| false
|
Wintermute0110/plugin.program.advanced.emulator.launcher
|
tests/fakes.py
|
1
|
8256
|
from abc import ABCMeta, abstractmethod
from resources.objects import *
from resources.utils import *
from resources.scrap import *
class FakeRomSetRepository(ROMSetRepository):
def __init__(self, roms):
self.roms = roms
def find_by_launcher(self, launcher):
return self.roms
def save_rom_set(self, launcher, roms):
self.roms = roms
def delete_all_by_launcher(self, launcher):
self.roms = {}
class FakeExecutor(ExecutorABC):
def __init__(self):
self.actualApplication = None
self.actualArgs = None
super(FakeExecutor, self).__init__(None)
def getActualApplication(self):
return self.actualApplication
def getActualArguments(self):
return self.actualArgs
def execute(self, application, arguments, non_blocking):
self.actualApplication = application
self.actualArgs = arguments
pass
class FakeClass():
def FakeMethod(self, value, key, launcher):
self.value = value
class FakeFile(FileName):
def __init__(self, pathString):
self.fakeContent = ''
self.path_str = pathString
self.path_tr = pathString
self.exists = self.exists_fake
self.write = self.write_fake
def setFakeContent(self, content):
self.fakeContent = content
def getFakeContent(self):
return self.fakeContent
def loadFileToStr(self, encoding = 'utf-8'):
return self.fakeContent
def readAllUnicode(self, encoding='utf-8'):
contents = unicode(self.fakeContent)
return contents
def saveStrToFile(self, data_str, encoding = 'utf-8'):
self.fakeContent = data_str
def write_fake(self, bytes):
self.fakeContent = self.fakeContent + bytes
def open(self, mode):
pass
def close(self):
pass
def writeAll(self, bytes, flags='w'):
self.fakeContent = self.fakeContent + bytes
def pjoin(self, *args):
child = FakeFile(self.path_str)
child.setFakeContent(self.fakeContent)
for arg in args:
child.path_str = os.path.join(child.path_str, arg)
child.path_tr = os.path.join(child.path_tr, arg)
return child
def switchExtension(self, targetExt):
switched_fake = super(FakeFile, self).switchExtension(targetExt)
#switched_fake = FakeFile(switched_type.getPath())
switched_fake.setFakeContent(self.fakeContent)
return switched_fake
def exists_fake(self):
return True
def scanFilesInPathAsFileNameObjects(self, mask = '*.*'):
return []
#backwards compatiblity
def __create__(self, path):
return FakeFile(path)
class Fake_Paths:
def __init__(self, fake_base, fake_addon_id = 'ael-tests'):
# --- Base paths ---
self.ADDONS_DATA_DIR = FileName(fake_base, isdir = True)
self.ADDON_DATA_DIR = self.ADDONS_DATA_DIR.pjoin(fake_addon_id, isdir = True)
self.PROFILE_DIR = self.ADDONS_DATA_DIR.pjoin('profile', isdir = True)
self.HOME_DIR = self.ADDONS_DATA_DIR.pjoin('home', isdir = True)
self.ADDONS_DIR = self.HOME_DIR.pjoin('addons', isdir = True)
self.ADDON_CODE_DIR = self.ADDONS_DIR.pjoin(fake_addon_id, isdir = True)
self.ICON_FILE_PATH = self.ADDON_CODE_DIR.pjoin('media/icon.png')
self.FANART_FILE_PATH = self.ADDON_CODE_DIR.pjoin('media/fanart.jpg')
# --- Databases and reports ---
self.CATEGORIES_FILE_PATH = self.ADDON_DATA_DIR.pjoin('categories.xml')
self.FAV_JSON_FILE_PATH = self.ADDON_DATA_DIR.pjoin('favourites.json')
self.COLLECTIONS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('collections.xml')
self.VCAT_TITLE_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_title.xml')
self.VCAT_YEARS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_years.xml')
self.VCAT_GENRE_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_genre.xml')
self.VCAT_DEVELOPER_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_developers.xml')
self.VCAT_NPLAYERS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_nplayers.xml')
self.VCAT_ESRB_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_esrb.xml')
self.VCAT_RATING_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_rating.xml')
self.VCAT_CATEGORY_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_category.xml')
# Launcher app stdout/stderr file
self.LAUNCH_LOG_FILE_PATH = self.ADDON_DATA_DIR.pjoin('launcher.log')
self.RECENT_PLAYED_FILE_PATH = self.ADDON_DATA_DIR.pjoin('history.json')
self.MOST_PLAYED_FILE_PATH = self.ADDON_DATA_DIR.pjoin('most_played.json')
self.BIOS_REPORT_FILE_PATH = self.ADDON_DATA_DIR.pjoin('report_BIOS.txt')
self.LAUNCHER_REPORT_FILE_PATH = self.ADDON_DATA_DIR.pjoin('report_Launchers.txt')
# --- Offline scraper databases ---
self.GAMEDB_INFO_DIR = self.ADDON_CODE_DIR.pjoin('GameDBInfo', isdir = True)
self.GAMEDB_JSON_BASE_NOEXT = 'GameDB_info'
self.LAUNCHBOX_INFO_DIR = self.ADDON_CODE_DIR.pjoin('LaunchBox', isdir = True)
self.LAUNCHBOX_JSON_BASE_NOEXT = 'LaunchBox_info'
# --- Artwork and NFO for Categories and Launchers ---
self.CATEGORIES_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-categories', isdir = True)
self.COLLECTIONS_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-collections', isdir = True)
self.LAUNCHERS_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-launchers', isdir = True)
self.FAVOURITES_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-favourites', isdir = True)
self.VIRTUAL_CAT_TITLE_DIR = self.ADDON_DATA_DIR.pjoin('db_title', isdir = True)
self.VIRTUAL_CAT_YEARS_DIR = self.ADDON_DATA_DIR.pjoin('db_year', isdir = True)
self.VIRTUAL_CAT_GENRE_DIR = self.ADDON_DATA_DIR.pjoin('db_genre', isdir = True)
self.VIRTUAL_CAT_DEVELOPER_DIR = self.ADDON_DATA_DIR.pjoin('db_developer', isdir = True)
self.VIRTUAL_CAT_NPLAYERS_DIR = self.ADDON_DATA_DIR.pjoin('db_nplayer', isdir = True)
self.VIRTUAL_CAT_ESRB_DIR = self.ADDON_DATA_DIR.pjoin('db_esrb', isdir = True)
self.VIRTUAL_CAT_RATING_DIR = self.ADDON_DATA_DIR.pjoin('db_rating', isdir = True)
self.VIRTUAL_CAT_CATEGORY_DIR = self.ADDON_DATA_DIR.pjoin('db_category', isdir = True)
self.ROMS_DIR = self.ADDON_DATA_DIR.pjoin('db_ROMs', isdir = True)
self.COLLECTIONS_DIR = self.ADDON_DATA_DIR.pjoin('db_Collections', isdir = True)
self.REPORTS_DIR = self.ADDON_DATA_DIR.pjoin('reports', isdir = True)
class FakeScraper(Scraper):
def __init__(self, settings, launcher, rom_data_to_apply = None):
self.rom_data_to_apply = rom_data_to_apply
scraper_settings = ScraperSettings(1,1,False,True)
super(FakeScraper, self).__init__(scraper_settings, launcher, True, [])
def getName(self):
return 'FakeScraper'
def supports_asset_type(self, asset_info):
return True
def _get_candidates(self, searchTerm, romPath, rom):
return ['fake']
def _load_metadata(self, candidate, romPath, rom):
gamedata = self._new_gamedata_dic()
if self.rom_data_to_apply :
gamedata['title'] = self.rom_data_to_apply['m_name'] if 'm_name' in self.rom_data_to_apply else ''
gamedata['year'] = self.rom_data_to_apply['m_year'] if 'm_year' in self.rom_data_to_apply else ''
gamedata['genre'] = self.rom_data_to_apply['m_genre'] if 'm_genre' in self.rom_data_to_apply else ''
gamedata['developer'] = self.rom_data_to_apply['m_developer']if 'm_developer' in self.rom_data_to_apply else ''
gamedata['plot'] = self.rom_data_to_apply['m_plot'] if 'm_plot' in self.rom_data_to_apply else ''
else:
gamedata['title'] = romPath.getBase_noext()
def _load_assets(self, candidate, romPath, rom):
pass
|
gpl-2.0
| -8,512,910,359,609,642,000
| 42.005208
| 123
| 0.622941
| false
| 3.251674
| false
| false
| false
|
coolbombom/CouchPotatoServer
|
couchpotato/core/downloaders/transmission/main.py
|
1
|
10725
|
from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
import httplib
import json
import os.path
import re
import traceback
import urllib2
log = CPLog(__name__)
class Transmission(Downloader):
type = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
def download(self, data, movie, filedata = None):
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type')))
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
# Set parameters for Transmission
params = {
'paused': self.conf('paused', default = 0),
}
if len(self.conf('directory', default = '')) > 0:
folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1]
params['download-dir'] = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep)
torrent_params = {}
if self.conf('ratio'):
torrent_params = {
'seedRatioLimit': self.conf('ratio'),
'seedRatioMode': self.conf('ratiomode')
}
if not filedata and data.get('type') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Send request to Transmission
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
if data.get('type') == 'torrent_magnet':
remote_torrent = trpc.add_torrent_uri(data.get('url'), arguments = params)
torrent_params['trackerAdd'] = self.torrent_trackers
else:
remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params)
if not remote_torrent:
return False
# Change settings of added torrents
elif torrent_params:
trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
except:
log.error('Failed to change settings for transfer: %s', traceback.format_exc())
return False
def getAllDownloadStatus(self):
log.debug('Checking Transmission download status.')
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
# Go through Queue
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isFinished', 'downloadDir', 'uploadRatio']
}
queue = trpc.get_alltorrents(return_params)
except Exception, err:
log.error('Failed getting queue: %s', err)
return False
if not queue:
return []
statuses = StatusList(self)
# Get torrents status
# CouchPotato Status
#status = 'busy'
#status = 'failed'
#status = 'completed'
# Transmission Status
#status = 0 => "Torrent is stopped"
#status = 1 => "Queued to check files"
#status = 2 => "Checking files"
#status = 3 => "Queued to download"
#status = 4 => "Downloading"
#status = 4 => "Queued to seed"
#status = 6 => "Seeding"
#To do :
# add checking file
# manage no peer in a range time => fail
for item in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / confRatio=%s / isFinished=%s', (item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], self.conf('ratio'), item['isFinished']))
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
if (item['percentDone'] * 100) >= 100 and (item['status'] == 6 or item['status'] == 0) and item['uploadRatio'] > self.conf('ratio'):
try:
trpc.stop_torrent(item['hashString'], {})
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'completed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': os.path.join(item['downloadDir'], item['name']),
})
if ((not os.path.isdir(item['downloadDir']))) and (self.conf('from') in item['downloadDir'])):
trpc.remove_torrent(item['id'], "true", {})
except Exception, err:
log.error('Failed to stop and remove torrent "%s" with error: %s', (item['name'], err))
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'failed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
})
else:
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'busy',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = item['eta'])), # Is ETA in seconds??
})
return statuses
class TransmissionRPC(object):
"""TransmissionRPC lite library"""
def __init__(self, host = 'localhost', port = 9091, username = None, password = None):
super(TransmissionRPC, self).__init__()
self.url = 'http://' + host + ':' + str(port) + '/transmission/rpc'
self.tag = 0
self.session_id = 0
self.session = {}
if username and password:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager))
opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
urllib2.install_opener(opener)
elif username or password:
log.debug('User or password missing, not using authentication.')
self.session = self.get_session()
def _request(self, ojson):
self.tag += 1
headers = {'x-transmission-session-id': str(self.session_id)}
request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers)
try:
open_request = urllib2.urlopen(request)
response = json.loads(open_request.read())
log.debug('request: %s', json.dumps(ojson))
log.debug('response: %s', json.dumps(response))
if response['result'] == 'success':
log.debug('Transmission action successfull')
return response['arguments']
else:
log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result'])
return False
except httplib.InvalidURL, err:
log.error('Invalid Transmission host, check your config %s', err)
return False
except urllib2.HTTPError, err:
if err.code == 401:
log.error('Invalid Transmission Username or Password, check your config')
return False
elif err.code == 409:
msg = str(err.read())
try:
self.session_id = \
re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1)
log.debug('X-Transmission-Session-Id: %s', self.session_id)
# #resend request with the updated header
return self._request(ojson)
except:
log.error('Unable to get Transmission Session-Id %s', err)
else:
log.error('TransmissionRPC HTTPError: %s', err)
except urllib2.URLError, err:
log.error('Unable to connect to Transmission %s', err)
def get_session(self):
post_data = {'method': 'session-get', 'tag': self.tag}
return self._request(post_data)
def add_torrent_uri(self, torrent, arguments):
arguments['filename'] = torrent
post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
return self._request(post_data)
def add_torrent_file(self, torrent, arguments):
arguments['metainfo'] = torrent
post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
return self._request(post_data)
def set_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag}
return self._request(post_data)
def get_alltorrents(self, arguments):
post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag}
return self._request(post_data)
def stop_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-stop', 'tag': self.tag}
return self._request(post_data)
def remove_torrent(self, torrent_id, remove_local_data, arguments):
arguments['ids'] = torrent_id
arguments['delete-local-data'] = remove_local_data
post_data = {'arguments': arguments, 'method': 'torrent-remove', 'tag': self.tag}
return self._request(post_data)
|
gpl-3.0
| 1,714,337,725,172,411,100
| 41.9
| 339
| 0.554499
| false
| 4.288285
| true
| false
| false
|
astroswego/magellanic-structure
|
src/magstruct/transformations.py
|
1
|
1722
|
import numpy
from numpy import array, sin, cos
__all__ = [
'Equatorial2Cartesian',
'Rotation3D',
'rotation_matrix_3d'
]
class Equatorial2Cartesian():
def __init__(self, RA_0, Dec_0, D_0):
self.RA_0 = RA_0
self.Dec_0 = Dec_0
self.D_0 = D_0
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **params):
X_new = numpy.empty_like(X)
x, y, z = X_new[:,0], X_new[:,1], X_new[:,2]
RA, Dec, D = X[:,0], X[:,1], X[:,2]
delta_RA = RA - self.RA_0
x[:] = -D * sin(delta_RA) * cos(Dec)
y[:] = D * (sin(Dec) * cos(self.Dec_0) +
sin(self.Dec_0) * cos(delta_RA) * cos(Dec))
z[:] = self.D_0 \
- D * (sin(Dec)*sin(self.Dec_0) + cos(RA)*cos(self.Dec_0)) \
- self.RA_0*cos(Dec)
return X_new
def rotation_matrix_3d(angle, axis):
assert axis in range(3), 'Axis must be 0, 1, or 2'
T = numpy.empty((3, 3), dtype=float)
# find the index of the -sin(angle) term
# this formula is the polynomial which passes through all of the pairs
# (axis, index)
i = axis**2 - 4*axis + 5
T.flat[::3+1] = cos(angle)
T.flat[i::3-1] = sin(angle)
# negate the -sin(angle) term, as it is currently just sin(angle)
T.flat[i] *= -1
T[axis,:] = 0
T[:,axis] = 0
T[axis,axis] = 1
return T
class Rotation3D():
def __init__(self, angle, axis):
self.axis = axis
self.angle = angle
self.rotation_matrix = rotation_matrix_3d(angle, axis)
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **params):
return self.rotation_matrix.dot(X)
|
mit
| -4,956,154,805,142,992,000
| 24.701493
| 74
| 0.522648
| false
| 2.903879
| false
| false
| false
|
lindemann09/pyForceDAQ
|
forceDAQ/data_handling/read_force_data.py
|
1
|
2147
|
"""
Functions to read your force and event data
"""
__author__ = 'Oliver Lindemann'
import os
import sys
import gzip
from collections import OrderedDict
import numpy as np
TAG_COMMENTS = "#"
TAG_UDPDATA = TAG_COMMENTS + "UDP"
TAG_DAQEVENTS = TAG_COMMENTS + "T"
def _csv(line):
return list(map(lambda x: x.strip(), line.split(",")))
def DataFrameDict(data, varnames):
"""data frame: Dict of numpy arrays
does not require Pandas, but can be easily converted to pandas dataframe
via pandas.DataFrame(data_frame_dict)
"""
rtn = OrderedDict()
for v in varnames:
rtn[v] = []
for row in data:
for v, d in zip(varnames, row):
rtn[v].append(d)
return rtn
def data_frame_to_text(data_frame):
rtn = ",".join(data_frame.keys())
rtn += "\n"
for x in np.array(list(data_frame.values())).T:
rtn += ",".join(x) + "\n"
return rtn
def read_raw_data(path):
"""reading trigger and udp data
Returns: data, udp_event, daq_events and comments
data, udp_event, daq_events: DataFrameDict
comments: text string
"""
daq_events = []
udp_events = []
comments = ""
data = []
varnames = None
app_dir = os.path.split(sys.argv[0])[0]
path = os.path.abspath(os.path.join(app_dir, path))
if path.endswith("gz"):
fl = gzip.open(path, "rt")
else:
fl = open(path, "rt")
for ln in fl:
if ln.startswith(TAG_COMMENTS):
comments += ln
if ln.startswith(TAG_UDPDATA + ","):
udp_events.append(_csv(ln[len(TAG_UDPDATA) + 1:]))
elif ln.startswith(TAG_DAQEVENTS):
daq_events.append(_csv(ln[len(TAG_DAQEVENTS) + 1:]))
else:
# data
if varnames is None:
# first row contains varnames
varnames = _csv(ln)
else:
data.append(_csv(ln))
fl.close()
return (DataFrameDict(data, varnames),
DataFrameDict(udp_events, ["time", "value"]),
DataFrameDict(daq_events, ["time", "value"]),
comments)
|
mit
| -4,905,626,410,306,586,000
| 23.123596
| 76
| 0.56218
| false
| 3.451768
| false
| false
| false
|
cemarchi/biosphere
|
Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DataIntegration/IntermediateRepresentation/Transformers/MicroRnaToGeneTransformer.py
|
1
|
4546
|
import math
import statistics
from itertools import groupby
from random import randint
from typing import Dict, Tuple, Counter
import pandas as pd
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Generators import \
IntermediateRepresentationGeneratorBase
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Transformers.SampleTransformerBase import \
SampleTransformerBase
from Src.BioDataManagement.CrossCutting.DTOs.ExpressionLevelStatusDto import ExpressionLevelStatusDto
class MicroRnaToGeneTransformer(SampleTransformerBase):
"""
"""
def __init__(self,
intermediateRepresentationGenerator: IntermediateRepresentationGeneratorBase,
get_global_diff_values_action,
get_mirna_gene_target_action):
super().__init__(intermediateRepresentationGenerator)
self.__get_mirna_gene_target_action = get_mirna_gene_target_action
self.__get_global_diff_values_action = get_global_diff_values_action
def transform(self, from_sample_matrix: pd.DataFrame, is_highly_significant: bool) -> Tuple[pd.DataFrame, Dict[int, ExpressionLevelStatusDto]]:
mirna_gene_targets = {mirna.lower(): g for mirna, g in
self.__get_mirna_gene_targets(from_sample_matrix.columns.tolist()).items()}
mirna_samples = self.__get_mirna_samples(from_sample_matrix, mirna_gene_targets)
id_entrez_list = list(set([id_entrez for mirna_symbol, id_entrez_list in mirna_gene_targets.items()
for id_entrez in id_entrez_list]))
measure_matrix = dict([(g, []) for g in id_entrez_list])
key_func = lambda gene: gene[0]
for patient_id, exp_values in mirna_samples.items():
gene_values = [(id_entrez,
exp_value) for mirna_symbol, exp_value in exp_values.items()
for id_entrez in mirna_gene_targets[mirna_symbol]]
gene_values = sorted(gene_values, key=key_func)
for id_entrez, measures in groupby(gene_values, key_func):
measures = [measure for id_entrez, measure in list(measures) if not math.isnan(measure)]
measure_matrix[id_entrez].append(float('NaN') if not measures else statistics.mean(measures))
gene_matrix = pd.DataFrame.from_dict(measure_matrix).dropna(axis=1,how='all')
gene_matrix = self.intermediateRepresentationGenerator.generate(gene_matrix).dropna(axis=1,how='all')
return gene_matrix, \
self.__get_gene_status(mirna_gene_targets, gene_matrix.columns.tolist(), is_highly_significant)
def __get_mirna_gene_targets(self, mirnas):
gene_targets = {}
fe_target = self.__get_mirna_gene_target_action(mirnas)
gene_targets.update(dict([(t.microrna_symbol, list(set(gene_targets[t.microrna_symbol] + t.id_entrez_genes)))
if t.microrna_symbol in gene_targets
else (t.microrna_symbol, t.id_entrez_genes) for t in fe_target.result_list]))
return gene_targets
def __get_mirna_samples(self, from_sample_matrix, mirna_gene_targets):
from_sample_matrix = from_sample_matrix[list(mirna_gene_targets.keys()) + ['patient_id']]
from_sample_matrix.set_index("patient_id", drop=True, inplace=True)
return from_sample_matrix.to_dict(orient="index")
def __get_gene_status(self, mirna_gene_targets, genes, is_highly_significant):
diff_mirna = [diff for diff in self.__get_global_diff_values_action(is_highly_significant).result.values
if diff.element_id in mirna_gene_targets]
genes_status = [(g, diff.status) for diff in diff_mirna
for g in mirna_gene_targets[diff.element_id] if g in genes]
key_func = lambda gene: gene[0]
genes_status = sorted(genes_status, key=key_func)
genes_status_dict = {}
for id_entrez, status in groupby(genes_status, key_func):
status = list(status)
status_counter = Counter(status)
status = [k for k, v in status_counter.most_common()]
len_status = len(status) - 1
genes_status_dict[id_entrez] = status[0] if len_status == 1 else status[randint(0, len_status)]
return dict([(entrez_id, status[1]) for entrez_id, status in genes_status_dict.items()])
|
bsd-3-clause
| 261,306,271,858,045,380
| 49.522222
| 147
| 0.653322
| false
| 3.625199
| false
| false
| false
|
aplanas/kmanga
|
kmanga/core/models.py
|
1
|
21424
|
import os.path
from django.conf import settings
from django.db import connection
from django.db import models
from django.db.models import Count
from django.db.models import F
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
class TimeStampedModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Source(TimeStampedModel):
name = models.CharField(max_length=200)
spider = models.CharField(max_length=80)
url = models.URLField(unique=True)
has_footer = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
def __str__(self):
return self.name
class SourceLanguage(TimeStampedModel):
GERMAN = 'DE'
ENGLISH = 'EN'
SPANISH = 'ES'
FRENCH = 'FR'
ITALIAN = 'IT'
RUSSIAN = 'RU'
PORTUGUESE = 'PT'
LANGUAGE_CHOICES = (
(ENGLISH, 'English'),
(SPANISH, 'Spanish'),
(GERMAN, 'German'),
(FRENCH, 'French'),
(ITALIAN, 'Italian'),
(RUSSIAN, 'Russian'),
(PORTUGUESE, 'Portuguese'),
)
language = models.CharField(max_length=2, choices=LANGUAGE_CHOICES)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
def __str__(self):
return '%s (%s)' % (self.get_language_display(), self.language)
class ConsolidateGenre(TimeStampedModel):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Genre(TimeStampedModel):
name = models.CharField(max_length=200)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
# consolidategenre = models.ForeignKey(ConsolidateGenre,
# on_delete=models.CASCADE)
def __str__(self):
return self.name
class AdvRawQuerySet(models.query.RawQuerySet):
"""RawQuerySet subclass with advanced options."""
def __init__(self, raw_query, paged_query, count_query,
model=None, query=None, params=None,
translations=None, using=None, hints=None):
super(AdvRawQuerySet, self).__init__(raw_query, model=model,
query=query,
params=params,
translations=translations,
using=using, hints=hints)
self.raw_query = raw_query
self.paged_query = paged_query
self.count_query = count_query
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
else:
start, stop = key, key + 1
if self.params:
params = self.params + [stop-start, start]
else:
params = (stop-start, start)
return models.query.RawQuerySet(self.paged_query,
model=self.model,
params=params,
translations=self.translations,
using=self._db,
hints=self._hints)
def __len__(self):
cursor = connection.cursor()
cursor.execute(self.count_query, self.params)
return cursor.fetchone()[0]
class MangaQuerySet(models.QuerySet):
def latests(self):
"""Return the lastest mangas with new/updated issues."""
# The correct annotation expression is the next one, but due
# to an error in Django ORM, this empression uses a full GROUP
# BY with the data fields. This produce a slow query.
#
# return self.annotate(
# models.Max('issue__modified')
# ).order_by('-issue__modified__max')
#
# Alternative (without deferreds)
#
# extra_query = '''
# SELECT MAX(core_issue.modified)
# FROM core_issue
# WHERE core_issue.manga_id = core_manga.id
# '''
# Manga.objects.extra({
# 'issue__modified__max': extra_query
# }).order_by('-issue__modified__max')
raw_query = '''
SELECT core_manga.id,
MAX(core_issue.modified) AS issue__modified__max
FROM core_manga
LEFT OUTER JOIN core_issue
ON (core_manga.id = core_issue.manga_id)
GROUP BY core_manga.id
ORDER BY issue__modified__max DESC NULLS LAST,
core_manga.name ASC,
core_manga.url ASC;
'''
paged_query = '''
SELECT core_manga.id,
MAX(core_issue.modified) AS issue__modified__max
FROM core_manga
LEFT OUTER JOIN core_issue
ON (core_manga.id = core_issue.manga_id)
GROUP BY core_manga.id
ORDER BY issue__modified__max DESC NULLS LAST,
core_manga.name ASC,
core_manga.url ASC
LIMIT %s
OFFSET %s;
'''
count_query = '''
SELECT COUNT(*)
FROM core_manga;
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
using=self.db)
def _to_tsquery(self, q):
"""Convert a query to a PostgreSQL tsquery."""
# Remove special chars (except parens)
q = ''.join(c if c.isalnum() or c in '()' else ' ' for c in q)
# Separate parentesis from words
for token in ('(', ')'):
q = q.replace(token, ' %s ' % token)
# Parse the query
op = {
'and': '&',
'or': '|',
'not': '-',
'(': '(',
')': ')',
}
# Join operators
j = '&|'
# Operators that expect and join before
ops_j = '-('
tsquery = []
for token in q.split():
if token in op:
if tsquery and op[token] in ops_j and tsquery[-1] not in j:
tsquery.append(op['and'])
tsquery.append(op[token])
else:
if tsquery and tsquery[-1] not in (j + ops_j):
tsquery.append(op['and'])
tsquery.append('%s:*' % token)
# Add spaces between join operators
tsquery = [(t if t not in j else ' %s ' % t) for t in tsquery]
return ''.join(tsquery)
def is_valid(self, q):
"""Check is the query is a valid query."""
q = self._to_tsquery(q)
# Separate parentesis from words
for token in ('(', ')'):
q = q.replace(token, ' %s ' % token)
s = []
for token in q.split():
if token == '(':
s.append(token)
elif token == ')':
try:
t = s.pop()
except IndexError:
return False
if t != '(':
return False
return not len(s)
def search(self, q):
q = self._to_tsquery(q)
raw_query = '''
SELECT core_manga.*
FROM (
SELECT id
FROM core_manga_fts_view,
to_tsquery(%s) AS q
WHERE document @@ q
ORDER BY ts_rank(document, q) DESC,
name ASC,
url ASC
) AS ids
INNER JOIN core_manga ON core_manga.id = ids.id;
'''
paged_query = '''
SELECT core_manga.*
FROM (
SELECT id
FROM core_manga_fts_view,
to_tsquery(%s) AS q
WHERE document @@ q
ORDER BY ts_rank(document, q) DESC,
name ASC,
url ASC
LIMIT %s
OFFSET %s
) AS ids
INNER JOIN core_manga ON core_manga.id = ids.id;
'''
count_query = '''
SELECT COUNT(*)
FROM core_manga_fts_view
WHERE document @@ to_tsquery(%s);
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
params=[q],
using=self.db)
def refresh(self):
cursor = connection.cursor()
cursor.execute('REFRESH MATERIALIZED VIEW core_manga_fts_view;')
def _cover_path(instance, filename):
return os.path.join(instance.source.spider, filename)
class Manga(TimeStampedModel):
LEFT_TO_RIGHT = 'LR'
RIGHT_TO_LEFT = 'RL'
READING_DIRECTION = (
(LEFT_TO_RIGHT, 'Left-to-right'),
(RIGHT_TO_LEFT, 'Right-to-left'),
)
ONGOING = 'O'
COMPLETED = 'C'
STATUS = (
(ONGOING, 'Ongoing'),
(COMPLETED, 'Completed'),
)
ASC = 'ASC'
DESC = 'DESC'
RANK_ORDER = (
(ASC, 'Ascending'),
(DESC, 'Descending'),
)
name = models.CharField(max_length=200, db_index=True)
# slug = models.SlugField(max_length=200)
# release = models.DateField()
author = models.CharField(max_length=200)
artist = models.CharField(max_length=200)
reading_direction = models.CharField(max_length=2,
choices=READING_DIRECTION,
default=RIGHT_TO_LEFT)
status = models.CharField(max_length=1,
choices=STATUS,
default=ONGOING)
genres = models.ManyToManyField(Genre)
rank = models.FloatField(null=True, blank=True)
rank_order = models.CharField(max_length=4,
choices=RANK_ORDER,
default=ASC)
description = models.TextField()
cover = models.ImageField(upload_to=_cover_path)
url = models.URLField(unique=True, db_index=True)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
objects = MangaQuerySet.as_manager()
def __str__(self):
return self.name
def subscribe(self, user, language=None, issues_per_day=4, paused=False):
"""Subscribe an User to the current manga."""
language = language if language else user.userprofile.language
obj, created = Subscription.all_objects.update_or_create(
manga=self,
user=user,
defaults={
'language': language,
'issues_per_day': issues_per_day,
'paused': paused,
'deleted': False,
})
return obj
def is_subscribed(self, user):
"""Check if an user is subscribed to this manga."""
return self.subscription(user).exists()
def subscription(self, user):
"""Return the users' subscription of this manga."""
return self.subscription_set.filter(user=user)
def languages(self):
"""Return the number of issues per language."""
return self.issue_set\
.values('language')\
.order_by('language')\
.annotate(Count('language'))
class AltName(TimeStampedModel):
name = models.CharField(max_length=200)
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Issue(TimeStampedModel):
name = models.CharField(max_length=200)
number = models.CharField(max_length=10)
order = models.IntegerField()
language = models.CharField(max_length=2,
choices=SourceLanguage.LANGUAGE_CHOICES)
release = models.DateField()
url = models.URLField(unique=True, max_length=255)
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
class Meta:
ordering = ('order', 'name')
def __str__(self):
return self.name
def is_sent(self, user):
"""Check if an user has received this issue."""
return self.result(user, status=Result.SENT).exists()
def create_result_if_needed(self, user, status, set_send_date=True):
"""Create `Result` if is new with a status."""
defaults = {'status': status}
if set_send_date:
defaults['send_date'] = timezone.now()
subscription = Subscription.objects.get(
manga=self.manga, user=user)
result, _ = Result.objects.update_or_create(
issue=self,
subscription=subscription,
defaults=defaults)
return result
def result(self, user, status=None):
"""Return the Result for an user for this issue."""
# XXX TODO - Avoid filtering by subscription__deleted using
# the Subscription manager.
query = self.result_set.filter(
subscription__user=user,
subscription__deleted=False)
if status:
query = query.filter(status=status)
return query
def retry_if_failed(self, user):
"""Increment the retry field of `Result` if status is FAIL."""
self.result(user, status=Result.FAILED).update(retry=F('retry') + 1)
class SubscriptionQuerySet(models.QuerySet):
def latests(self, user):
"""Return the latests subscriptions with changes in Result."""
# See the notes from `MangaQuerySet.latests()`
raw_query = '''
SELECT core_subscription.id,
MAX(core_result.modified) AS result__modified__max
FROM core_subscription
LEFT OUTER JOIN core_result
ON (core_subscription.id = core_result.subscription_id)
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s
GROUP BY core_subscription.id
ORDER BY result__modified__max DESC NULLS LAST,
core_subscription.id ASC;
'''
paged_query = '''
SELECT core_subscription.id,
MAX(core_result.modified) AS result__modified__max
FROM core_subscription
LEFT OUTER JOIN core_result
ON (core_subscription.id = core_result.subscription_id)
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s
GROUP BY core_subscription.id
ORDER BY result__modified__max DESC NULLS LAST,
core_subscription.id ASC
LIMIT %s
OFFSET %s;
'''
count_query = '''
SELECT COUNT(*)
FROM core_subscription
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s;
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
params=[user.id],
using=self.db)
class SubscriptionManager(models.Manager):
def get_queryset(self):
"""Exclude deleted subscriptions."""
return super(SubscriptionManager,
self).get_queryset().exclude(deleted=True)
class SubscriptionActiveManager(models.Manager):
def get_queryset(self):
"""Exclude paused and deleted subscriptions."""
return super(SubscriptionActiveManager,
self).get_queryset().exclude(
Q(paused=True) | Q(deleted=True))
class Subscription(TimeStampedModel):
# Number of retries before giving up in a FAILED result
RETRY = 3
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
language = models.CharField(max_length=2,
choices=SourceLanguage.LANGUAGE_CHOICES)
issues_per_day = models.IntegerField(default=4)
paused = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = SubscriptionManager.from_queryset(SubscriptionQuerySet)()
actives = SubscriptionActiveManager.from_queryset(SubscriptionQuerySet)()
all_objects = models.Manager()
class Meta:
unique_together = ('manga', 'user')
def __str__(self):
return '%s (%d per day)' % (self.manga, self.issues_per_day)
def issues(self):
"""Return the list of issues in the language of the Subscription."""
return self.manga.issue_set.filter(language=self.language)
def issues_to_send(self, retry=None):
"""Return the list of issues to send, ordered by number."""
if not retry:
retry = Subscription.RETRY
already_sent = Result.objects.processed_last_24hs(self.user,
subscription=self)
remains = max(0, self.issues_per_day-already_sent)
return self.manga.issue_set.filter(
language=self.language
).exclude(
pk__in=self.result_set.filter(
Q(status__in=(Result.PROCESSING, Result.SENT)) |
(Q(status=Result.FAILED) & Q(retry__gt=retry))
).values('issue__id')
).order_by('order')[:remains]
def issues_to_retry(self, retry=None):
"""Return the list of issues to retry, ordered by number."""
# This method doesn't take care about the limits of the user
if not retry:
retry = Subscription.RETRY
return self.manga.issue_set.filter(
language=self.language,
result__subscription=self,
result__status=Result.FAILED,
result__retry__lte=retry
).order_by('order')
def add_sent(self, issue):
"""Add or update a Result to a Subscription."""
# XXX TODO - add_sent is deprecated, use
# Issue.create_result_if_needed, or extend the features inside
# Subscription.
return Result.objects.update_or_create(
issue=issue,
subscription=self,
defaults={
'status': Result.SENT,
'send_date': timezone.now(),
})
def latest_issues(self):
"""Return the list of issues ordered by modified result."""
return self.issues().filter(
result__subscription=self
).annotate(
models.Max('result__modified')
).order_by('-result__modified')
class ResultQuerySet(models.QuerySet):
TIME_DELTA = 2
def latests(self, status=None):
query = self
if status:
query = query.filter(status=status)
return query.order_by('-modified')
def _processed_last_24hs(self, user, subscription=None):
"""Return the list of `Result` processed during the last 24 hours."""
today = timezone.now()
yesterday = today - timezone.timedelta(days=1)
# XXX TODO - Objects are created / modified always after time
# T. If the send process is slow, the error margin can be
# bigger than the one used here.
yesterday += timezone.timedelta(hours=ResultQuerySet.TIME_DELTA)
query = self.filter(
subscription__user=user,
send_date__range=[yesterday, today],
)
if subscription:
query = query.filter(subscription=subscription)
return query
def processed_last_24hs(self, user, subscription=None):
"""Return the number of `Result` processed during the last 24 hours."""
return self._processed_last_24hs(user, subscription).count()
def pending(self):
return self.latests(status=Result.PENDING)
def processing(self):
return self.latests(status=Result.PROCESSING)
def sent(self):
return self.latests(status=Result.SENT)
def failed(self):
return self.latests(status=Result.FAILED)
class Result(TimeStampedModel):
PENDING = 'PE'
PROCESSING = 'PR'
SENT = 'SE'
FAILED = 'FA'
STATUS_CHOICES = (
(PENDING, 'Pending'),
(PROCESSING, 'Processing'),
(SENT, 'Sent'),
(FAILED, 'Failed'),
)
issue = models.ForeignKey(Issue, on_delete=models.CASCADE)
subscription = models.ForeignKey(Subscription, on_delete=models.CASCADE)
status = models.CharField(max_length=2, choices=STATUS_CHOICES,
default=PENDING)
missing_pages = models.IntegerField(default=0)
send_date = models.DateTimeField(null=True, blank=True)
retry = models.IntegerField(default=0)
objects = ResultQuerySet.as_manager()
class Meta:
unique_together = ('issue', 'subscription')
def __str__(self):
return '%s (%s)' % (self.issue, self.get_status_display())
def get_absolute_url(self):
return reverse('result-detail', kwargs={'pk': self.pk})
def set_status(self, status):
self.status = status
# If the result is marked as FAILED, unset the `send_date`.
# In this way, if the result is moved to PENDING is not
# counted as SENT. Also if is not moved, the user can have
# one more issue for this day.
if status == Result.FAILED:
self.send_date = None
self.save()
def is_pending(self):
return self.status == Result.PENDING
def is_processing(self):
return self.status == Result.PROCESSING
def is_sent(self):
return self.status == Result.SENT
def is_failed(self):
return self.status == Result.FAILED
|
gpl-3.0
| -7,570,314,442,123,775,000
| 32.632653
| 80
| 0.56334
| false
| 4.111303
| true
| false
| false
|
HingeChat/HingeChat
|
src/hingechat/qt/qChatWidget.py
|
1
|
8829
|
import re
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QFontMetrics
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QSplitter
from PyQt5.QtWidgets import QTextBrowser
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QWidget
from src.hingechat.qt import qtUtils
from src.hinge.utils import *
class QChatWidget(QWidget):
def __init__(self, chat_window, nick, parent=None):
QWidget.__init__(self, parent)
self.chat_window = chat_window
self.nick = nick
self.disabled = False
self.cleared = False
self.url_regex = re.compile(URL_REGEX)
self.chat_log = QTextBrowser()
self.chat_log.setOpenExternalLinks(True)
self.chat_input = QTextEdit()
self.chat_input.textChanged.connect(self.chatInputTextChanged)
self.send_button = QPushButton("Send")
self.send_button.clicked.connect(self.sendMessage)
# Set the min height for the chatlog and a matching fixed height for the send button
chat_input_font_metrics = QFontMetrics(self.chat_input.font())
self.chat_input.setMinimumHeight(chat_input_font_metrics.lineSpacing() * 3)
self.send_button.setFixedHeight(chat_input_font_metrics.lineSpacing() * 3)
hbox = QHBoxLayout()
hbox.addWidget(self.chat_input)
hbox.addWidget(self.send_button)
# Put the chatinput and send button in a wrapper widget so they may be added to the splitter
chat_input_wrapper = QWidget()
chat_input_wrapper.setLayout(hbox)
chat_input_wrapper.setMinimumHeight(chat_input_font_metrics.lineSpacing() * 3.7)
# Put the chat log and chat input into a splitter so the user can resize them at will
splitter = QSplitter(Qt.Vertical)
splitter.addWidget(self.chat_log)
splitter.addWidget(chat_input_wrapper)
splitter.setSizes([int(parent.height()), 1])
hbox = QHBoxLayout()
hbox.addWidget(splitter)
self.setLayout(hbox)
self.typing_timer = QTimer()
self.typing_timer.setSingleShot(True)
self.typing_timer.timeout.connect(self.stoppedTyping)
def setRemoteNick(self, nick):
self.nick = nick
def chatInputTextChanged(self):
# Check if the text changed was the text box being cleared to avoid sending an invalid typing status
if self.cleared:
self.cleared = False
return
if str(self.chat_input.toPlainText())[-1:] == '\n':
self.sendMessage()
else:
# Start a timer to check for the user stopping typing
self.typing_timer.start(TYPING_TIMEOUT)
self.sendTypingStatus(TYPING_START)
def stoppedTyping(self):
self.typing_timer.stop()
if str(self.chat_input.toPlainText()) == '':
self.sendTypingStatus(TYPING_STOP_WITHOUT_TEXT)
else:
self.sendTypingStatus(TYPING_STOP_WITH_TEXT)
def sendMessage(self):
if self.disabled:
return
else:
pass
self.typing_timer.stop()
text = str(self.chat_input.toPlainText())[:-1]
# Don't send empty messages
if text == '':
return
# Convert URLs into clickable links
text = self.__linkify(text)
# Add the message to the message queue to be sent
self.chat_window.client.getSession(self.remote_id).sendChatMessage(text)
# Clear the chat input
self.wasCleared = True
self.chat_input.clear()
self.appendMessage(text, MSG_SENDER)
def sendTypingStatus(self, status):
self.chat_window.client.getSession(self.remote_id).sendTypingMessage(status)
def showNowChattingMessage(self, nick):
self.nick = nick
self.remote_id = self.chat_window.client.getClientId(self.nick)
self.appendMessage("You are now securely chatting with " + self.nick + " :)",
MSG_SERVICE, show_timestamp_and_nick=False)
self.appendMessage("It's a good idea to verify the communcation is secure by selecting "
"\"authenticate buddy\" in the options menu.", MSG_SERVICE, show_timestamp_and_nick=False)
self.addNickButton = QPushButton('Add', self)
self.addNickButton.setGeometry(584, 8, 31, 23)
self.addNickButton.clicked.connect(self.addNickScreen)
self.addNickButton.show()
def addUser(self, user):
nick = str(user.text()).lower()
# Validate the given nick
nickStatus = utils.isValidNick(nick)
if nickStatus == errors.VALID_NICK:
# TODO: Group chats
pass
elif nickStatus == errors.INVALID_NICK_CONTENT:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_CONTENT)
elif nickStatus == errors.INVALID_NICK_LENGTH:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_LENGTH)
elif nickStatus == errors.INVALID_EMPTY_NICK:
QMessageBox.warning(self, errors.TITLE_EMPTY_NICK, errors.EMPTY_NICK)
def addNickScreen(self):
self.chat_log.setEnabled(False)
self.chat_input.setEnabled(False)
self.send_button.setEnabled(False)
self.addNickButton.hide()
self.addUserText = QLabel("Enter a username to add a user to the group chat.", self)
self.addUserText.setGeometry(200, 20, 300, 100)
self.addUserText.show()
self.user = QLineEdit(self)
self.user.setGeometry(200, 120, 240, 20)
self.user.returnPressed.connect(self.addUser)
self.user.show()
self.addUserButton = QPushButton('Add User', self)
self.addUserButton.setGeometry(250, 150, 150, 25)
self.addUserButton.clicked.connect(lambda: self.addUser(self.user))
self.addUserButton.show()
self.cancel = QPushButton('Cancel', self)
self.cancel.setGeometry(298, 210, 51, 23)
self.cancel.clicked.connect(lambda: self.chat_log.setEnabled(True))
self.cancel.clicked.connect(lambda: self.chat_input.setEnabled(True))
self.cancel.clicked.connect(lambda: self.send_button.setEnabled(True))
self.cancel.clicked.connect(self.addUserText.hide)
self.cancel.clicked.connect(self.user.hide)
self.cancel.clicked.connect(self.addUserButton.hide)
self.cancel.clicked.connect(self.addNickButton.show)
self.cancel.clicked.connect(self.cancel.hide)
self.cancel.show()
def appendMessage(self, message, source, show_timestamp_and_nick=True):
color = self.__getColor(source)
if show_timestamp_and_nick:
timestamp = '<font color="' + color + '">(' + getTimestamp() + ') <strong>' + \
(self.chat_window.client.nick if source == MSG_SENDER else self.nick) + \
':</strong></font> '
else:
timestamp = ''
# If the user has scrolled up (current value != maximum), do not move the scrollbar
# to the bottom after appending the message
shouldScroll = True
scrollbar = self.chat_log.verticalScrollBar()
if scrollbar.value() != scrollbar.maximum() and source != constants.SENDER:
shouldScroll = False
self.chat_log.append(timestamp + message)
# Move the vertical scrollbar to the bottom of the chat log
if shouldScroll:
scrollbar.setValue(scrollbar.maximum())
def __linkify(self, text):
matches = self.url_regex.findall(text)
for match in matches:
text = text.replace(match[0], '<a href="%s">%s</a>' % (match[0], match[0]))
return text
def __getColor(self, source):
if source == MSG_SENDER:
if qtUtils.is_light_theme:
return '#0000CC'
else:
return '#6666FF'
elif source == MSG_RECEIVER:
if qtUtils.is_light_theme:
return '#CC0000'
else:
return '#CC3333'
else:
if qtUtils.is_light_theme:
return '#000000'
else:
return '#FFFFFF'
def disable(self):
self.disabled = True
self.chat_input.setReadOnly(True)
def enable(self):
self.disabled = False
self.chat_input.setReadOnly(False)
|
lgpl-3.0
| 7,082,114,969,052,042,000
| 36.220779
| 117
| 0.619209
| false
| 3.998641
| false
| false
| false
|
JiahuiZHONG/Internship_Thread
|
tests/scripts/thread-cert/Cert_5_6_08_ContextManagement.py
|
1
|
4303
|
#!/usr/bin/python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import time
import unittest
import node
LEADER = 1
ROUTER = 2
ED = 3
class Cert_5_6_8_ContextManagement(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_context_reuse_delay(10)
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for node in self.nodes.itervalues():
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
time.sleep(3)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED].start()
time.sleep(3)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[ROUTER].add_prefix('2001::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(2)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
self.nodes[ROUTER].remove_prefix('2001::/64')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
self.nodes[ROUTER].add_prefix('2002::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
time.sleep(5)
self.nodes[ROUTER].add_prefix('2003::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| -5,238,427,176,629,124,000
| 35.159664
| 78
| 0.647223
| false
| 3.709483
| false
| false
| false
|
scherroman/mugen
|
mugen/location_utility.py
|
1
|
2143
|
from typing import List, Tuple
"""
Module for Location & Interval manipulation
"""
def intervals_from_locations(locations: List[float]) -> List[float]:
intervals = []
previous_location = None
for index, location in enumerate(locations):
if index == 0:
intervals.append(location)
else:
intervals.append(location - previous_location)
previous_location = location
return intervals
def locations_from_intervals(intervals: List[float]) -> List[float]:
locations = []
running_duration = 0
for index, interval in enumerate(intervals):
if index < len(intervals):
running_duration += interval
locations.append(running_duration)
return locations
def start_end_locations_from_locations(locations: List[float]) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each location
Ex) 5, 10, 15
start_times == 5, 10, 15
end_times == 10, 15, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
for index, location in enumerate(locations):
start_time = location
if index == len(locations) - 1:
end_time = location
else:
end_time = locations[index + 1]
start_locations.append(start_time)
end_locations.append(end_time)
return start_locations, end_locations
def start_end_locations_from_intervals(intervals: List[float]) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each interval
Ex) 5, 10, 15
start_times == 0, 5, 10
end_times == 5, 10, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
running_duration = 0
for index, duration in enumerate(intervals):
start_time = running_duration
end_time = start_time + duration
start_locations.append(start_time)
end_locations.append(end_time)
running_duration += duration
return start_locations, end_locations
|
mit
| 7,078,705,883,654,456,000
| 23.918605
| 98
| 0.615492
| false
| 4.268924
| false
| false
| false
|
adbrebs/spynet
|
models/max_pool_3d.py
|
1
|
3198
|
from theano import tensor
from theano.tensor.signal.downsample import DownsampleFactorMax
def max_pool_3d(input, ds, ignore_border=False):
"""
Takes as input a N-D tensor, where N >= 3. It downscales the input by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1],ds[2]) (depth, height, width)
Arguments:
input (N-D theano tensor of input images): input images. Max pooling will be done over the 3 last dimensions.
ds (tuple of length 3): factor by which to downscale. (2,2,2) will halve the video in each dimension.
ignore_border (boolean): When True, (5,5,5) input with ds=(2,2,2)
will generate a (2,2,2) output. (3,3,3) otherwise.
"""
if input.ndim < 3:
raise NotImplementedError('max_pool_3d requires a dimension >= 3')
# extract nr dimensions
vid_dim = input.ndim
# max pool in two different steps, so we can use the 2d implementation of
# downsamplefactormax. First maxpool frames as usual.
# Then maxpool the depth dimension. Shift the depth dimension to the third
# position, so rows and cols are in the back
# extract dimensions
frame_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
frame_shape), 'int32')
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of videos in rows and cols
op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
output = op(input_4D)
# restore to original shape
outshape = tensor.join(0, input.shape[:-2], output.shape[-2:])
out = tensor.reshape(output, outshape, ndim=input.ndim)
# now maxpool depth
# output (depth, rows, cols), reshape so that depth is in the back
shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
input_depth = out.dimshuffle(shufl)
# reset dimensions
vid_shape = input_depth.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input_depth.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,width,depth)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
vid_shape), 'int32')
input_4D_depth = tensor.reshape(input_depth, new_shape, ndim=4)
# downsample mini-batch of videos in depth
op = DownsampleFactorMax((1,ds[0]), ignore_border)
outdepth = op(input_4D_depth)
# output
# restore to original shape (xxx, rows, cols, depth)
outshape = tensor.join(0, input_depth.shape[:-2], outdepth.shape[-2:])
shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
return tensor.reshape(outdepth, outshape, ndim=input.ndim).dimshuffle(shufl)
|
bsd-2-clause
| 608,790,142,119,456,300
| 42.821918
| 117
| 0.641026
| false
| 3.468547
| false
| false
| false
|
mikedh/trimesh
|
trimesh/path/exchange/load.py
|
1
|
2622
|
import os
from .dxf import _dxf_loaders
from .svg_io import svg_to_path
from ..path import Path
from . import misc
from ... import util
def load_path(file_obj, file_type=None, **kwargs):
"""
Load a file to a Path file_object.
Parameters
-----------
file_obj : One of the following:
- Path, Path2D, or Path3D file_objects
- open file file_object (dxf or svg)
- file name (dxf or svg)
- shapely.geometry.Polygon
- shapely.geometry.MultiLineString
- dict with kwargs for Path constructor
- (n,2,(2|3)) float, line segments
file_type : str
Type of file is required if file
file_object passed.
Returns
---------
path : Path, Path2D, Path3D file_object
Data as a native trimesh Path file_object
"""
if isinstance(file_obj, Path):
# we have been passed a Path file_object so
# do nothing and return the passed file_object
return file_obj
elif util.is_file(file_obj):
# for open file file_objects use loaders
kwargs.update(path_loaders[file_type](
file_obj, file_type=file_type))
elif util.is_string(file_obj):
# strings passed are evaluated as file file_objects
with open(file_obj, 'rb') as file_file_obj:
# get the file type from the extension
file_type = os.path.splitext(file_obj)[-1][1:].lower()
# call the loader
kwargs.update(path_loaders[file_type](
file_file_obj, file_type=file_type))
elif util.is_instance_named(file_obj, 'Polygon'):
# convert from shapely polygons to Path2D
kwargs.update(misc.polygon_to_path(file_obj))
elif util.is_instance_named(file_obj, 'MultiLineString'):
# convert from shapely LineStrings to Path2D
kwargs.update(misc.linestrings_to_path(file_obj))
elif isinstance(file_obj, dict):
# load as kwargs
from ...exchange.load import load_kwargs
return load_kwargs(file_obj)
elif util.is_sequence(file_obj):
# load as lines in space
kwargs.update(misc.lines_to_path(file_obj))
else:
raise ValueError('Not a supported object type!')
from ...exchange.load import load_kwargs
return load_kwargs(kwargs)
def path_formats():
"""
Get a list of supported path formats.
Returns
------------
loaders : list of str
Extensions of loadable formats, ie:
['svg', 'dxf']
"""
return list(path_loaders.keys())
path_loaders = {'svg': svg_to_path}
path_loaders.update(_dxf_loaders)
|
mit
| 7,173,151,525,337,611,000
| 29.847059
| 66
| 0.615179
| false
| 3.78355
| false
| false
| false
|
jakdot/pyactr
|
tutorials/u7_simplecompilation.py
|
1
|
1254
|
"""
Testing a simple case of production compilation. The compilation also allows for utility learning, shown in the model below, as well.
"""
import warnings
import pyactr as actr
class Compilation1(object):
"""
Model testing compilation -- basic cases.
"""
def __init__(self, **kwargs):
actr.chunktype("state", "starting ending")
self.m = actr.ACTRModel(**kwargs)
self.m.goal.add(actr.makechunk(nameofchunk="start", typename="state", starting=1))
self.m.productionstring(name="one", string="""
=g>
isa state
starting =x
ending ~=x
==>
=g>
isa state
ending =x""", utility=2)
self.m.productionstring(name="two", string="""
=g>
isa state
starting =x
ending =x
==>
=g>
isa state
starting =x
ending 4""")
if __name__ == "__main__":
warnings.simplefilter("ignore")
mm = Compilation1(production_compilation=True, utility_learning=True)
model = mm.m
sim = model.simulation(realtime=True)
sim.run(0.5)
print(model.productions["one and two"])
|
gpl-3.0
| 8,944,572,004,527,267,000
| 24.591837
| 133
| 0.53429
| false
| 4.045161
| false
| false
| false
|
duyet-website/api.duyet.net
|
lib/docs/conf.py
|
1
|
8187
|
# -*- coding: utf-8 -*-
#
# Faker documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 11 11:25:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'faker.build_docs',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Faker'
copyright = u'2014, Daniele Faraglia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.5'
# The full version, including alpha/beta/rc tags.
release = '0.7.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fakerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Faker.tex', u'Faker Documentation',
u'Daniele Faraglia', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'faker', u'Faker Documentation',
[u'Daniele Faraglia'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Faker', u'Faker Documentation',
u'Daniele Faraglia', 'Faker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
| 5,187,688,238,013,529,000
| 30.367816
| 79
| 0.706119
| false
| 3.704525
| true
| false
| false
|
coyotevz/nobix-app
|
nbs/models/misc.py
|
1
|
3865
|
# -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy.ext.declarative import declared_attr
from nbs.models import db
class TimestampMixin(object):
created = db.Column(db.DateTime, default=datetime.now)
modified = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
@staticmethod
def stamp_modified(mapper, connection, target):
if db.object_session(target).is_modified(target):
target.modified = datetime.now()
@classmethod
def __declare_last__(cls):
db.event.listen(cls, 'before_update', cls.stamp_modified)
class RefEntityMixin(object):
@declared_attr
def entity_id(cls):
return db.Column('entity_id', db.Integer, db.ForeignKey('entity.id'),
nullable=False)
@declared_attr
def entity(cls):
name = cls.__name__.lower()
return db.relationship('Entity',
backref=db.backref(name, lazy='joined'),
lazy='joined')
class Address(RefEntityMixin, db.Model):
"""Stores addresses information"""
__tablename__ = 'address'
id = db.Column(db.Integer, primary_key=True)
address_type = db.Column(db.Unicode)
street = db.Column(db.Unicode(128), nullable=False)
city = db.Column(db.Unicode(64))
province = db.Column(db.Unicode(32), nullable=False)
postal_code = db.Column(db.Unicode(32))
def __str__(eslf):
retval = self.street
if self.city:
retval += ", {}".format(self.city)
retval += ", {}".format(self.province)
if self.postal_code:
retval += " ({})".format(self.postal_code)
return retval
def __repr__(self):
return "<Address '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class Phone(RefEntityMixin, db.Model):
"""Model to store phone information"""
__tablename__ = 'phone'
id = db.Column(db.Integer, primary_key=True)
phone_type = db.Column(db.Unicode)
prefix = db.Column(db.Unicode(8))
number = db.Column(db.Unicode, nullable=False)
extension = db.Column(db.Unicode(5))
def __str__(self):
retval = self.phone_type+': ' if self.phone_type else ''
if self.prefix:
retval += "({})".format(self.prefix)
retval += self.number
if self.extension:
retval += " ext: {}".format(self.extension)
return retval
def __repr__(self):
return "<Phone '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class Email(RefEntityMixin, db.Model):
"""Model to store email information"""
__tablename__ = 'email'
id = db.Column(db.Integer, primary_key=True)
email_type = db.Column(db.Unicode(50))
email = db.Column(db.Unicode(50), nullable=False)
def __str__(self):
retval = self.email_type + ': ' if self.email_type else ''
retval += self.email
return retval
def __repr__(self):
return "<Email '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class ExtraField(RefEntityMixin, db.Model):
"""Model to store information of additional data"""
__tablename__ = 'extra_field'
id = db.Column(db.Integer, primary_key=True)
field_name = db.Column(db.Unicode(50), nullable=False)
field_value = db.Column(db.Unicode(50), nullable=False)
def __str__(self):
return self.field_name + ': ' + self.field_value
def __repr__(self):
return "<ExtraField '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
|
mit
| 6,016,156,342,892,989,000
| 28.280303
| 77
| 0.574386
| false
| 3.789216
| false
| false
| false
|
pablodiguerero/asterisk.api
|
migrations/versions/4_add_physical_users_.py
|
1
|
1968
|
"""empty message
Revision ID: a374e36d0888
Revises: 4a6559da7594
Create Date: 2017-05-21 22:53:53.490856
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import Session
from models import physical
from models import user
# revision identifiers, used by Alembic.
revision = '4_add_physical_users'
down_revision = '3_modify_user_fields'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('crm_physical',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fam', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('otch', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('crm_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('login', sa.String(length=255), nullable=False),
sa.Column('password', sa.LargeBinary(), nullable=False),
sa.Column('access_level', sa.Integer(), server_default='10', nullable=False),
sa.Column('is_active', sa.Boolean(), server_default='f', nullable=False),
sa.ForeignKeyConstraint(['id'], ['crm_physical.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('login')
)
op.add_column('crm_users', sa.Column('sip_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'crm_users', 'asterisk_sip_users', ['sip_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
bind = op.get_bind()
session = Session(bind)
phys = physical.Physical("Администратор")
phys.user = user.User("admin", "admin")
phys.user.access_level = user.User.BOSS
session.add(phys)
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('crm_users')
op.drop_table('crm_physical')
# ### end Alembic commands ###
|
mit
| 1,240,968,507,135,020,300
| 31.583333
| 107
| 0.670588
| false
| 3.347603
| false
| false
| false
|
wesm/ibis
|
dev/merge-pr.py
|
1
|
8184
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to
# Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# Lightly modified from version of this script in incubator-parquet-format
from __future__ import print_function
from requests.auth import HTTPBasicAuth
import requests
import os
import subprocess
import sys
import textwrap
from six.moves import input
import six
IBIS_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
PROJECT_NAME = 'ibis'
print("IBIS_HOME = " + IBIS_HOME)
# Remote name with the PR
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "upstream")
# Remote name where results pushed
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "upstream")
GITHUB_BASE = "https://github.com/cloudera/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/cloudera/" + PROJECT_NAME
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(IBIS_HOME)
auth_required = False
if auth_required:
GITHUB_USERNAME = os.environ['GITHUB_USER']
import getpass
GITHUB_PASSWORD = getpass.getpass('Enter github.com password for %s:'
% GITHUB_USERNAME)
def get_json_auth(url):
auth = HTTPBasicAuth(GITHUB_USERNAME, GITHUB_PASSWORD)
req = requests.get(url, auth=auth)
return req.json()
get_json = get_json_auth
else:
def get_json_no_auth(url):
req = requests.get(url)
return req.json()
get_json = get_json_no_auth
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % cmd)
print('With output:')
print('--------------')
print(e.output)
print('--------------')
raise e
if isinstance(output, six.binary_type):
output = output.decode('utf-8')
return output
def continue_maybe(prompt):
result = input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num,
target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num,
pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref,
target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = ("Error merging: %s\nWould you like to "
"manually fix-up this merge?" % e)
continue_maybe(msg)
msg = ("Okay, please fix any conflicts and 'git add' "
"conflicting files... Finished?")
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x),
reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
merge_message_flags += ["-m", '\n'.join(textwrap.wrap(body))]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = ("This patch had conflicts when merged, "
"resolved by\nCommitter: %s <%s>" %
(committer_name, committer_email))
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close
# the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:"
% (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit',
'--no-verify', # do not run commit hooks
'--author="%s"' % primary_author] +
merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name,
target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"),
[x['name'] for x in branches])
pr_num = input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print("Pull request {0} has already been merged, assuming "
"you want to backport".format(pr_num))
merge_commit_desc = run_cmd([
'git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #{0}"
", you may need to update HEAD.".format(pr_num))
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print("Found: %s" % message)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = ("Pull request {0} is not mergeable in its current form.\n"
"Continue? (experts only!)".format(pr_num))
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
|
apache-2.0
| -6,758,902,495,747,892,000
| 31.86747
| 78
| 0.601417
| false
| 3.487005
| false
| false
| false
|
vinoth3v/In
|
In/core/valuator.py
|
1
|
7873
|
import re
from In.core.object_meta import ObjectMetaBase
class ValuatorContainer(dict):
def __missing__(self, key):
vcls = IN.register.get_class(key, 'Valuator')
obj = vcls()
self[key] = obj
return obj
class ValuatorEngine:
'''Valuator class that valuate values based on validation rules.
Instance available as IN.valuator
'''
# dict of all Valuator instances
valuators = ValuatorContainer()
def validate(self, value, rule): # rule is ['type', args] or [[], [], []]
'''
#TODO: allow per false error message
rule = [
'And', [
['Length', '>', 6, 'The value length should be greater than 6.'],
['Not', [['Num']],
['Or', [
['Email', 'Invalid email address.'],
['Domain'],
['Url', 'Invalid Url.'],
]],
]],
]
'''
if not rule: # empty list
return [True]
try:
firstitem = rule[0]
item_type = type(firstitem)
if item_type is str: # ['type', args]
args = rule[1:]
result = self.valuators[firstitem].validate(value, *args)
if not result[0]:
#return [False, args[-1]] # last item is error message
return result
elif item_type is list: # [[], [], []]
for subrule in rule:
result = self.validate(value, subrule) # recursive
if not result[0]:
return result
except Exception as e:
IN.logger.debug()
return [False, str(e)]
return [True]
def __getattr__(self, key):
self.key = self.valuators[key]
return self.key
class ValuatorMeta(ObjectMetaBase):
__class_type_base_name__ = 'ValuatorBase'
__class_type_name__ = 'Valuator'
class ValuatorBase(dict, metaclass = ValuatorMeta):
'''Base class of all IN ValuatorBase.
'''
__allowed_children__ = None
__default_child__ = None
ops = {
'=' : lambda l, al, ml: l == al,
'==' : lambda l, al, ml: l == al,
'!=' : lambda l, al, ml: l != al,
'>' : lambda l, al, ml: l > al,
'<' : lambda l, al, ml: l < al,
'>=' : lambda l, al, ml: l >= al,
'<=' : lambda l, al, ml: l <= al,
'<>' : lambda l, al, ml: al < l > ml,
'><' : lambda l, al, ml: al > l < ml,
}
def validate(self, value):
'''return value should be a list like [False, 'Error message.'] or [True]
'''
return [True]
@IN.register('Valuator', type = 'Valuator')
class Valuator(ValuatorBase):
'''Base class of all IN ValuatorBase.
'''
pass
class And(Valuator):
pass
class Or(Valuator):
pass
class Not(Valuator):
def validate(self, value, rule, message = ''):
'''not validator'''
result = IN.valuator.validate(value, rule[0])
not_result = not result[0]
return [not_result, message]
class Empty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if value else [True]
class NotEmpty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if not value else [True]
class Length(Valuator):
def validate(self, value, length = 0, op = '=', mlength = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](len(value), length, mlength)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Equal(Valuator):
def validate(self, value, tvalue, op = '=', mvalue = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](value, tvalue, mvalue)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Regx(Valuator):
'''Valuator rule class that using regex'''
re_compiled = {} # we dont want to compile again
def get_regx(self, regx):
try:
return self.re_compiled[regx]
except KeyError:
self.re_compiled[regx] = re.compile(regx)
return self.re_compiled[regx]
def validate(self, value, regx, message = ''):
result = self.get_regx(regx).match(value)
return [result, message]
class Domain(Regx):
regex_host = r'(?:(?:[a-zA-Z0-9][a-zA-Z0-9\-]*)?[a-zA-Z0-9])'
def validate(self, domain, message = ''):
false_message = [False, message]
dlen = len(domain)
if dlen < 4 or dlen > 255 or domain.endswith('.') or '.' not in domain:
return false_message
try:
domain = domain.encode('idna').decode('ascii')
except Exception:
return false_message
try:
domain.encode('ascii').decode('idna')
except Exception:
return false_message
reg = self.regex_host + r'(?:\.' + self.regex_host + r')*'
m = re.match(reg + "$", domain)
if not m:
return false_message
return [True]
class Email(Regx):
regex = re.compile(r'^[A-Za-z0-9\.\+_-]')
atext = r'a-zA-Z0-9_\.\-' # !#\$%&\'\*\+/=\?\^`\{\|\}~
atext_utf8 = atext + r"\u0080-\U0010FFFF"
regex_local = re.compile(''.join(('[', atext, ']+(?:\\.[', atext, ']+)*$')))
regex_local_utf8 = re.compile(''.join(('[', atext_utf8, ']+(?:\\.[', atext_utf8, ']+)*$')))
def validate(self, value, message = ''):
parts = value.split('@')
if len(parts) != 2:
return [False, message]
local = self.validate_local(parts[0])
if not local:
return [False, message]
# check domain part
domain_result = IN.valuator.validate(parts[1], ['Domain', message])
if not domain_result[0]:
return domain_result
return [True] # valid
def validate_local(self, local):
# check nabar name part
if not local or len(local) > 64 or '..' in local:
return False
m = re.match(self.regex_local, local) # ASCII
if m: # True
return True
else:
# unicode
m = re.match(self.regex_local_utf8, local)
if m:
return True
else:
return False
class Url(Regx):
def validate(self, value, message = ''):
return True
class Alpha(Valuator):
def validate(self, value, message = ''):
return [str.isalpha(value), message]
class AlphaNum(Valuator):
def validate(self, value, message = ''):
return [str.isalnum(value), message]
class Digit(Valuator):
def validate(self, value, message = ''):
return [str.isdigit(value), message]
class Decimal(Valuator):
def validate(self, value, message = ''):
return [str.isdecimal(value), message]
class Lower(Valuator):
def validate(self, value, message = ''):
return [str.islower(value), message]
class Upper(Valuator):
def validate(self, value, message = ''):
return [str.isupper(value), message]
class Numeric(Valuator):
def validate(self, value, message = ''):
return [str.isnumeric(value), message]
class Space(Valuator):
'''Is value has only non printable chars'''
def validate(self, value, message = ''):
return [str.isspace(value), message]
class Startswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).startswith(start), message]
class Endswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).endswith(start), message]
class In(Valuator):
def validate(self, value, itr, message = ''):
return [value in itr, message]
class INPath(Valuator):
'''Check whether this string is a valid IN route.'''
def validate(self, value, message = ''):
return True
class NabarRole(Valuator):
'''Check whether nabar has this role.'''
def validate(self, value, message = ''):
return True
class NabarAccess(Valuator):
'''Check whether nabar has this access permissions.'''
def validate(self, value):
return True
class Callback(Valuator):
'''call the Callback to valuate.'''
def validate(self, value, message = ''):
return True
#@IN.hook
#def __In_app_init__(app):
### set the valuator
#IN.valuator = ValuatorEngine()
|
apache-2.0
| 3,151,906,794,283,461,600
| 23.396774
| 92
| 0.61133
| false
| 3.104495
| false
| false
| false
|
plotly/plotly.py
|
packages/python/plotly/plotly/graph_objs/heatmapgl/legendgrouptitle/_font.py
|
1
|
8487
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmapgl.legendgrouptitle"
_path_str = "heatmapgl.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.heatmapgl.lege
ndgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmapgl.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
mit
| 5,632,017,995,005,265,000
| 36.387665
| 82
| 0.559797
| false
| 4.001414
| false
| false
| false
|
danhooper/sandbox
|
pinball/attract.py
|
1
|
2386
|
from procgame import *
class Mode(game.Mode):
def __init__(self, game):
super(Mode, self).__init__(game, 1)
highscore_categories = []
cat = highscore.HighScoreCategory()
cat.game_data_key = "HighScores"
cat.titles = [
"Grand Champion",
"High Score 1",
"High Score 2",
"High Score 3",
"High Score 4"
]
highscore_categories.append(cat)
for category in highscore_categories:
category.load_from_game(game)
frame_proc = dmd.Animation().load('dmd/P-ROC.dmd').frames[0]
layer_proc = dmd.FrameLayer(opaque=True, frame=frame_proc)
layer_th = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("Town Hall")
layer_presents = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("Presents")
layer_name = dmd.TextLayer(128/2, 7, game.font_jazz, "center",
opaque=True).set_text("TBD")
layer_high_scores = []
for frame in highscore.generate_highscore_frames(highscore_categories):
layer_high_scores.append(dmd.FrameLayer(opaque=True, frame=frame))
self.layer = dmd.ScriptedLayer(128, 32, [
{ "layer": None, "seconds": 10.0 },
{ "layer": layer_proc, "seconds": 3.0 },
{ "layer": layer_th, "seconds": 3.0 },
{ "layer": layer_presents, "seconds": 3.0 },
{ "layer": layer_name, "seconds": 3.0 },
{ "layer": layer_high_scores[0], "seconds": 3.0 },
{ "layer": layer_high_scores[1], "seconds": 3.0 },
{ "layer": layer_high_scores[2], "seconds": 3.0 },
{ "layer": layer_high_scores[3], "seconds": 3.0 },
{ "layer": layer_high_scores[4], "seconds": 3.0 },
])
def mode_stopped(self):
self.layer.script_index = 0
self.frame_start_time = None
self.is_new_script_item = True
def sw_enter_active(self, sw):
self.game.modes.add(self.game.service_mode)
return True
def sw_exit_active(self, sw):
return True
def sw_startButton_active(self, sw):
self.game.modes.remove(self)
self.game.modes.add(self.game.mode.base)
return True
|
mit
| -5,484,256,992,289,313,000
| 38.766667
| 79
| 0.536463
| false
| 3.433094
| false
| false
| false
|
userzimmermann/robotframework-python3
|
src/robot/utils/text.py
|
1
|
3235
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .charwidth import get_char_width
from .misc import seq2str2
from .unic import unic
_MAX_ASSIGN_LENGTH = 200
_MAX_ERROR_LINES = 40
_MAX_ERROR_LINE_LENGTH = 78
_ERROR_CUT_EXPLN = ' [ Message content over the limit has been removed. ]'
def cut_long_message(msg):
lines = msg.splitlines()
lengths = _count_line_lengths(lines)
if sum(lengths) <= _MAX_ERROR_LINES:
return msg
start = _prune_excess_lines(lines, lengths)
end = _prune_excess_lines(lines, lengths, from_end=True)
return '\n'.join(start + [_ERROR_CUT_EXPLN] + end)
def _prune_excess_lines(lines, lengths, from_end=False):
if from_end:
lines.reverse()
lengths.reverse()
ret = []
total = 0
# Use // (explicit int div) for Python 3 compatibility:
limit = _MAX_ERROR_LINES//2
for line, length in zip(lines[:limit], lengths[:limit]):
if total + length >= limit:
ret.append(_cut_long_line(line, total, from_end))
break
total += length
ret.append(line)
if from_end:
ret.reverse()
return ret
def _cut_long_line(line, used, from_end):
# Use // (explicit int div) for Python 3 compatibility:
available_lines = _MAX_ERROR_LINES//2 - used
available_chars = available_lines * _MAX_ERROR_LINE_LENGTH - 3
if len(line) > available_chars:
if not from_end:
line = line[:available_chars] + '...'
else:
line = '...' + line[-available_chars:]
return line
def _count_line_lengths(lines):
return [ _count_virtual_line_length(line) for line in lines ]
def _count_virtual_line_length(line):
if not line:
return 1
lines, remainder = divmod(len(line), _MAX_ERROR_LINE_LENGTH)
return lines if not remainder else lines + 1
def format_assign_message(variable, value, cut_long=True):
value = unic(value) if variable.startswith('$') else seq2str2(value)
if cut_long and len(value) > _MAX_ASSIGN_LENGTH:
value = value[:_MAX_ASSIGN_LENGTH] + '...'
return '%s = %s' % (variable, value)
def get_console_length(text):
return sum(get_char_width(char) for char in text)
def pad_console_length(text, width):
if width < 5:
width = 5
diff = get_console_length(text) - width
if diff > 0:
text = _lose_width(text, diff+3) + '...'
return _pad_width(text, width)
def _pad_width(text, width):
more = width - get_console_length(text)
return text + ' ' * more
def _lose_width(text, diff):
lost = 0
while lost < diff:
lost += get_console_length(text[-1])
text = text[:-1]
return text
|
apache-2.0
| 7,472,979,966,690,661,000
| 30.715686
| 77
| 0.643586
| false
| 3.456197
| false
| false
| false
|
onyxfish/votersdaily_web
|
api/couchdb/log_views.py
|
1
|
2166
|
import couchdb
from couchdb.design import ViewDefinition
"""
This module defines a collection of functions which accept a CouchDB database
as an argument, are named with a 'make_views_*' convention, and return a list
of generated CouchDB ViewDefinitions.
The 'syncviews' management command dynamically executes each method to compile
a list of all Couchdb views.
"""
def make_views_all_documents(event_db):
"""
Generate a view that includes all documents.
"""
all_view_map_function = \
'''
function(doc) {
emit(doc.access_datetime, doc)
}
'''
return [ViewDefinition('api', 'all', all_view_map_function)]
def make_views_error_documents(event_db):
"""
Generate a view that includes all documents.
"""
error_view_map_function = \
'''
function(doc) {
if (doc.result != "success") {
emit(doc.access_datetime, doc)
}
}
'''
return [ViewDefinition('api', 'errors', error_view_map_function)]
def get_parser_list(event_db):
"""
Return a list of unique parser names in the database.
"""
parser_list_map_function = \
'''
function(doc) {
emit(doc.parser_name, null);
}
'''
parser_list_reduce_function = \
'''
function(keys, values) {
return null;
}
'''
return [
e.key for e in event_db.query(
parser_list_map_function,
parser_list_reduce_function,
group=True)]
def make_views_parser_lists(event_db):
"""
Return a list of views, one for each parser, using templated view
functions.
"""
parser_names = get_parser_list(event_db)
parser_view_map_function = \
'''
function(doc) {
if (doc.parser_name == "%(parser_name)s") {
emit(doc.parser_name, doc)
}
}
'''
return [
ViewDefinition('api', name,
parser_view_map_function % { 'parser_name': name })
for name in parser_names]
|
gpl-3.0
| 4,641,316,289,698,557,000
| 23.908046
| 78
| 0.548476
| false
| 4.094518
| false
| false
| false
|
pr-omethe-us/PyKED
|
pyked/chemked.py
|
1
|
44185
|
"""
Main ChemKED module
"""
# Standard libraries
from os.path import exists
from collections import namedtuple
from warnings import warn
from copy import deepcopy
import xml.etree.ElementTree as etree
import xml.dom.minidom as minidom
from itertools import chain
import numpy as np
# Local imports
from .validation import schema, OurValidator, yaml, Q_
from .converters import datagroup_properties, ReSpecTh_to_ChemKED
VolumeHistory = namedtuple('VolumeHistory', ['time', 'volume'])
VolumeHistory.__doc__ = 'Time history of the volume in an RCM experiment. Deprecated, to be removed after PyKED 0.4' # noqa: E501
VolumeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment'
VolumeHistory.volume.__doc__ = '(`~numpy.ndarray`): the volume during the experiment'
TimeHistory = namedtuple('TimeHistory', ['time', 'quantity', 'type'])
TimeHistory.__doc__ = 'Time history of the quantity in an RCM experiment'
TimeHistory.time.__doc__ = '(`~numpy.ndarray`): the time during the experiment'
TimeHistory.quantity.__doc__ = '(`~numpy.ndarray`): the quantity of interest during the experiment'
TimeHistory.type.__doc__ = """\
(`str`): the type of time history represented. Possible options are:
* volume
* temperature
* pressure
* piston position
* light emission
* OH emission
* absorption
"""
RCMData = namedtuple(
'RCMData',
['compressed_pressure', 'compressed_temperature', 'compression_time', 'stroke',
'clearance', 'compression_ratio']
)
RCMData.__doc__ = 'Data fields specific to rapid compression machine experiments'
RCMData.compressed_pressure.__doc__ = '(`~pint.Quantity`) The pressure at the end of compression'
RCMData.compressed_temperature.__doc__ = """\
(`~pint.Quantity`) The temperature at the end of compression"""
RCMData.compression_time.__doc__ = '(`~pint.Quantity`) The duration of the compression stroke'
RCMData.stroke.__doc__ = '(`~pint.Quantity`) The length of the stroke'
RCMData.clearance.__doc__ = """\
(`~pint.Quantity`) The clearance between piston face and end wall at the end of compression"""
RCMData.compression_ratio.__doc__ = '(`~pint.Quantity`) The volumetric compression ratio'
Reference = namedtuple('Reference',
['volume', 'journal', 'doi', 'authors', 'detail', 'year', 'pages'])
Reference.__doc__ = 'Information about the article or report where the data can be found'
Reference.volume.__doc__ = '(`str`) The journal volume'
Reference.journal.__doc__ = '(`str`) The name of the journal'
Reference.doi.__doc__ = '(`str`) The Digital Object Identifier of the article'
Reference.authors.__doc__ = '(`list`) The list of authors of the article'
Reference.detail.__doc__ = '(`str`) Detail about where the data can be found in the article'
Reference.year.__doc__ = '(`str`) The year the article was published'
Reference.pages.__doc__ = '(`str`) The pages in the journal where the article was published'
Apparatus = namedtuple('Apparatus', ['kind', 'institution', 'facility'])
Apparatus.__doc__ = 'Information about the experimental apparatus used to generate the data'
Apparatus.kind.__doc__ = '(`str`) The kind of experimental apparatus'
Apparatus.institution.__doc__ = '(`str`) The institution where the experiment is located'
Apparatus.facility.__doc__ = '(`str`) The particular experimental facility at the location'
Composition = namedtuple('Composition', 'species_name InChI SMILES atomic_composition amount')
Composition.__doc__ = 'Detail of the initial composition of the mixture for the experiment'
Composition.species_name.__doc__ = '(`str`) The name of the species'
Composition.InChI.__doc__ = '(`str`) The InChI identifier for the species'
Composition.SMILES.__doc__ = '(`str`) The SMILES identifier for the species'
Composition.atomic_composition.__doc__ = '(`dict`) The atomic composition of the species'
Composition.amount.__doc__ = '(`~pint.Quantity`) The amount of this species'
class ChemKED(object):
"""Main ChemKED class.
The ChemKED class stores information about the contents of a ChemKED database
file. It stores each datapoint associated with the database and provides access
the the reference information, versions, and file author.
Arguments:
yaml_file (`str`, optional): The filename of the YAML database in ChemKED format.
dict_input (`dict`, optional): A dictionary with the parsed ouput of YAML file in ChemKED
format.
skip_validation (`bool`, optional): Whether validation of the ChemKED should be done. Must
be supplied as a keyword-argument.
Attributes:
datapoints (`list`): List of `DataPoint` objects storing each datapoint in the database.
reference (`~collections.namedtuple`): Attributes include ``volume``, ``journal``, ``doi``,
``authors``, ``detail``, ``year``, and ``pages`` describing the reference from which the
datapoints are derived.
apparatus (`~collections.namedtuple`): Attributes include ``kind`` of experimental
apparatus, and the ``institution`` and ``facility`` where the experimental apparatus is
located.
chemked_version (`str`): Version of the ChemKED database schema used in this file.
experiment_type (`str`): Type of exeperimental data contained in this database.
file_author (`dict`): Information about the author of the ChemKED database file.
file_version (`str`): Version of the ChemKED database file.
_properties (`dict`): Original dictionary read from ChemKED database file, meant for
internal use.
"""
def __init__(self, yaml_file=None, dict_input=None, *, skip_validation=False):
if yaml_file is not None:
with open(yaml_file, 'r') as f:
self._properties = yaml.safe_load(f)
elif dict_input is not None:
self._properties = dict_input
else:
raise NameError("ChemKED needs either a YAML filename or dictionary as input.")
if not skip_validation:
self.validate_yaml(self._properties)
self.datapoints = []
for point in self._properties['datapoints']:
self.datapoints.append(DataPoint(point))
self.reference = Reference(
volume=self._properties['reference'].get('volume'),
journal=self._properties['reference'].get('journal'),
doi=self._properties['reference'].get('doi'),
authors=self._properties['reference'].get('authors'),
detail=self._properties['reference'].get('detail'),
year=self._properties['reference'].get('year'),
pages=self._properties['reference'].get('pages'),
)
self.apparatus = Apparatus(
kind=self._properties['apparatus'].get('kind'),
institution=self._properties['apparatus'].get('institution'),
facility=self._properties['apparatus'].get('facility'),
)
for prop in ['chemked-version', 'experiment-type', 'file-authors', 'file-version']:
setattr(self, prop.replace('-', '_'), self._properties[prop])
@classmethod
def from_respecth(cls, filename_xml, file_author='', file_author_orcid=''):
"""Construct a ChemKED instance directly from a ReSpecTh file.
Arguments:
filename_xml (`str`): Filename of the ReSpecTh-formatted XML file to be imported
file_author (`str`, optional): File author to be added to the list generated from the
XML file
file_author_orcid (`str`, optional): ORCID for the file author being added to the list
of file authors
Returns:
`ChemKED`: Instance of the `ChemKED` class containing the data in ``filename_xml``.
Examples:
>>> ck = ChemKED.from_respecth('respecth_file.xml')
>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber')
>>> ck = ChemKED.from_respecth('respecth_file.xml', file_author='Bryan W. Weber',
file_author_orcid='0000-0000-0000-0000')
"""
properties = ReSpecTh_to_ChemKED(filename_xml, file_author, file_author_orcid,
validate=False)
return cls(dict_input=properties)
def validate_yaml(self, properties):
"""Validate the parsed YAML file for adherance to the ChemKED format.
Arguments:
properties (`dict`): Dictionary created from the parsed YAML file
Raises:
`ValueError`: If the YAML file cannot be validated, a `ValueError` is raised whose
string contains the errors that are present.
"""
validator = OurValidator(schema)
if not validator.validate(properties):
for key, value in validator.errors.items():
if any(['unallowed value' in v for v in value]):
print(('{key} has an illegal value. Allowed values are {values} and are case '
'sensitive.').format(key=key, values=schema[key]['allowed']))
raise ValueError(validator.errors)
def get_dataframe(self, output_columns=None):
"""Get a Pandas DataFrame of the datapoints in this instance.
Arguments:
output_columns (`list`, optional): List of strings specifying the columns to include
in the output DataFrame. The default is `None`, which outputs all of the
columns. Options include (not case sensitive):
* ``Temperature``
* ``Pressure``
* ``Ignition Delay``
* ``Composition``
* ``Equivalence Ratio``
* ``Reference``
* ``Apparatus``
* ``Experiment Type``
* ``File Author``
* ``File Version``
* ``ChemKED Version``
In addition, specific fields from the ``Reference`` and ``Apparatus`` attributes can
be included by specifying the name after a colon. These options are:
* ``Reference:Volume``
* ``Reference:Journal``
* ``Reference:DOI``
* ``Reference:Authors``
* ``Reference:Detail``
* ``Reference:Year``
* ``Reference:Pages``
* ``Apparatus:Kind``
* ``Apparatus:Facility``
* ``Apparatus:Institution``
Only the first author is printed when ``Reference`` or ``Reference:Authors`` is
selected because the whole author list may be quite long.
Note:
If the Composition is selected as an output type, the composition specified in the
`DataPoint` is used. No attempt is made to convert to a consistent basis; mole fractions
will remain mole fractions, mass fractions will remain mass fractions, and mole percent
will remain mole percent. Therefore, it is possible to end up with more than one type of
composition specification in a given column. However, if the composition is included
in the resulting dataframe, the type of each composition will be specified by the "Kind"
field in each row.
Examples:
>>> df = ChemKED(yaml_file).get_dataframe()
>>> df = ChemKED(yaml_file).get_dataframe(['Temperature', 'Ignition Delay'])
Returns:
`~pandas.DataFrame`: Contains the information regarding each point in the ``datapoints``
attribute
"""
import pandas as pd
valid_labels = [a.replace('_', ' ') for a in self.__dict__
if not (a.startswith('__') or a.startswith('_'))
]
valid_labels.remove('datapoints')
valid_labels.extend(
['composition', 'ignition delay', 'temperature', 'pressure', 'equivalence ratio']
)
ref_index = valid_labels.index('reference')
valid_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields]
app_index = valid_labels.index('apparatus')
valid_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields]
species_list = list(set(chain(*[list(d.composition.keys()) for d in self.datapoints])))
if output_columns is None or len(output_columns) == 0:
col_labels = valid_labels
comp_index = col_labels.index('composition')
col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind']
else:
output_columns = [a.lower() for a in output_columns]
col_labels = []
for col in output_columns:
if col in valid_labels or col in ['reference', 'apparatus']:
col_labels.append(col)
else:
raise ValueError('{} is not a valid output column choice'.format(col))
if 'composition' in col_labels:
comp_index = col_labels.index('composition')
col_labels[comp_index:comp_index + 1] = species_list + ['Composition:Kind']
if 'reference' in col_labels:
ref_index = col_labels.index('reference')
col_labels[ref_index:ref_index + 1] = ['reference:' + a for a in Reference._fields]
if 'apparatus' in col_labels:
app_index = col_labels.index('apparatus')
col_labels[app_index:app_index + 1] = ['apparatus:' + a for a in Apparatus._fields]
data = []
for d in self.datapoints:
row = []
d_species = list(d.composition.keys())
for col in col_labels:
if col in species_list:
if col in d_species:
row.append(d.composition[col].amount)
else:
row.append(Q_(0.0, 'dimensionless'))
elif 'reference' in col or 'apparatus' in col:
split_col = col.split(':')
if split_col[1] == 'authors':
row.append(getattr(getattr(self, split_col[0]), split_col[1])[0]['name'])
else:
row.append(getattr(getattr(self, split_col[0]), split_col[1]))
elif col in ['temperature', 'pressure', 'ignition delay', 'equivalence ratio']:
row.append(getattr(d, col.replace(' ', '_')))
elif col == 'file authors':
row.append(getattr(self, col.replace(' ', '_'))[0]['name'])
elif col == 'Composition:Kind':
row.append(d.composition_type)
else:
row.append(getattr(self, col.replace(' ', '_')))
data.append(row)
col_labels = [a.title() for a in col_labels]
columns = pd.Index(col_labels)
return pd.DataFrame(data=data, columns=columns)
def write_file(self, filename, *, overwrite=False):
"""Write new ChemKED YAML file based on object.
Arguments:
filename (`str`): Filename for target YAML file
overwrite (`bool`, optional): Whether to overwrite file with given name if present.
Must be supplied as a keyword-argument.
Raises:
`NameError`: If ``filename`` is already present, and ``overwrite`` is not ``True``.
Example:
>>> dataset = ChemKED(yaml_file)
>>> dataset.write_file(new_yaml_file)
"""
# Ensure file isn't already present
if exists(filename) and not overwrite:
raise OSError(filename + ' already present. Specify "overwrite=True" '
'to overwrite, or rename.'
)
with open(filename, 'w') as yaml_file:
yaml.dump(self._properties, yaml_file)
def convert_to_ReSpecTh(self, filename):
"""Convert ChemKED record to ReSpecTh XML file.
This converter uses common information in a ChemKED file to generate a
ReSpecTh XML file. Note that some information may be lost, as ChemKED stores
some additional attributes.
Arguments:
filename (`str`): Filename for output ReSpecTh XML file.
Example:
>>> dataset = ChemKED(yaml_file)
>>> dataset.convert_to_ReSpecTh(xml_file)
"""
root = etree.Element('experiment')
file_author = etree.SubElement(root, 'fileAuthor')
file_author.text = self.file_authors[0]['name']
# right now ChemKED just uses an integer file version
file_version = etree.SubElement(root, 'fileVersion')
major_version = etree.SubElement(file_version, 'major')
major_version.text = str(self.file_version)
minor_version = etree.SubElement(file_version, 'minor')
minor_version.text = '0'
respecth_version = etree.SubElement(root, 'ReSpecThVersion')
major_version = etree.SubElement(respecth_version, 'major')
major_version.text = '1'
minor_version = etree.SubElement(respecth_version, 'minor')
minor_version.text = '0'
# Only ignition delay currently supported
exp = etree.SubElement(root, 'experimentType')
if self.experiment_type == 'ignition delay':
exp.text = 'Ignition delay measurement'
else:
raise NotImplementedError('Only ignition delay type supported for conversion.')
reference = etree.SubElement(root, 'bibliographyLink')
citation = ''
for author in self.reference.authors:
citation += author['name'] + ', '
citation += (self.reference.journal + ' (' + str(self.reference.year) + ') ' +
str(self.reference.volume) + ':' + self.reference.pages + '. ' +
self.reference.detail
)
reference.set('preferredKey', citation)
reference.set('doi', self.reference.doi)
apparatus = etree.SubElement(root, 'apparatus')
kind = etree.SubElement(apparatus, 'kind')
kind.text = self.apparatus.kind
common_properties = etree.SubElement(root, 'commonProperties')
# ChemKED objects have no common properties once loaded. Check for properties
# among datapoints that tend to be common
common = []
composition = self.datapoints[0].composition
# Composition type *has* to be the same
composition_type = self.datapoints[0].composition_type
if not all(dp.composition_type == composition_type for dp in self.datapoints):
raise NotImplementedError('Error: ReSpecTh does not support varying composition '
'type among datapoints.'
)
if all([composition == dp.composition for dp in self.datapoints]):
# initial composition is common
common.append('composition')
prop = etree.SubElement(common_properties, 'property')
prop.set('name', 'initial composition')
for species_name, species in composition.items():
component = etree.SubElement(prop, 'component')
species_link = etree.SubElement(component, 'speciesLink')
species_link.set('preferredKey', species_name)
if species.InChI is not None:
species_link.set('InChI', species.InChI)
amount = etree.SubElement(component, 'amount')
amount.set('units', composition_type)
amount.text = str(species.amount.magnitude)
# If multiple datapoints present, then find any common properties. If only
# one datapoint, then composition should be the only "common" property.
if len(self.datapoints) > 1:
for prop_name in datagroup_properties:
attribute = prop_name.replace(' ', '_')
quantities = [getattr(dp, attribute, False) for dp in self.datapoints]
# All quantities must have the property in question and all the
# values must be equal
if all(quantities) and quantities.count(quantities[0]) == len(quantities):
common.append(prop_name)
prop = etree.SubElement(common_properties, 'property')
prop.set('description', '')
prop.set('name', prop_name)
prop.set('units', str(quantities[0].units))
value = etree.SubElement(prop, 'value')
value.text = str(quantities[0].magnitude)
# Ignition delay can't be common, unless only a single datapoint.
datagroup = etree.SubElement(root, 'dataGroup')
datagroup.set('id', 'dg1')
datagroup_link = etree.SubElement(datagroup, 'dataGroupLink')
datagroup_link.set('dataGroupID', '')
datagroup_link.set('dataPointID', '')
property_idx = {}
labels = {'temperature': 'T', 'pressure': 'P',
'ignition delay': 'tau', 'pressure rise': 'dP/dt',
}
for prop_name in datagroup_properties:
attribute = prop_name.replace(' ', '_')
# This can't be hasattr because properties are set to the value None
# if no value is specified in the file, so the attribute always exists
prop_indices = [i for i, dp in enumerate(self.datapoints)
if getattr(dp, attribute) is not None
]
if prop_name in common or not prop_indices:
continue
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', prop_name)
units = str(getattr(self.datapoints[prop_indices[0]], attribute).units)
prop.set('units', units)
idx = 'x{}'.format(len(property_idx) + 1)
property_idx[idx] = {'name': prop_name, 'units': units}
prop.set('id', idx)
prop.set('label', labels[prop_name])
# Need to handle datapoints with possibly different species in the initial composition
if 'composition' not in common:
for dp in self.datapoints:
for species in dp.composition.values():
# Only add new property for species not already considered
has_spec = any([species.species_name in d.values()
for d in property_idx.values()
])
if not has_spec:
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
idx = 'x{}'.format(len(property_idx) + 1)
property_idx[idx] = {'name': species.species_name}
prop.set('id', idx)
prop.set('label', '[' + species.species_name + ']')
prop.set('name', 'composition')
prop.set('units', self.datapoints[0].composition_type)
species_link = etree.SubElement(prop, 'speciesLink')
species_link.set('preferredKey', species.species_name)
if species.InChI is not None:
species_link.set('InChI', species.InChI)
for dp in self.datapoints:
datapoint = etree.SubElement(datagroup, 'dataPoint')
for idx, val in property_idx.items():
# handle regular properties a bit differently than composition
if val['name'] in datagroup_properties:
value = etree.SubElement(datapoint, idx)
quantity = getattr(dp, val['name'].replace(' ', '_')).to(val['units'])
value.text = str(quantity.magnitude)
else:
# composition
for item in dp.composition.values():
if item.species_name == val['name']:
value = etree.SubElement(datapoint, idx)
value.text = str(item.amount.magnitude)
# See https://stackoverflow.com/a/16097112 for the None.__ne__
history_types = ['volume_history', 'temperature_history', 'pressure_history',
'piston_position_history', 'light_emission_history',
'OH_emission_history', 'absorption_history']
time_histories = [getattr(dp, p) for dp in self.datapoints for p in history_types]
time_histories = list(filter(None.__ne__, time_histories))
if len(self.datapoints) > 1 and len(time_histories) > 1:
raise NotImplementedError('Error: ReSpecTh files do not support multiple datapoints '
'with a time history.')
elif len(time_histories) > 0:
for dg_idx, hist in enumerate(time_histories):
if hist.type not in ['volume', 'temperature', 'pressure']:
warn('The time-history type {} is not supported by ReSpecTh for '
'ignition delay experiments'.format(hist.type))
continue
datagroup = etree.SubElement(root, 'dataGroup')
datagroup.set('id', 'dg{}'.format(dg_idx))
datagroup_link = etree.SubElement(datagroup, 'dataGroupLink')
datagroup_link.set('dataGroupID', '')
datagroup_link.set('dataPointID', '')
# Time history has two properties: time and quantity.
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', 'time')
prop.set('units', str(hist.time.units))
time_idx = 'x{}'.format(len(property_idx) + 1)
property_idx[time_idx] = {'name': 'time'}
prop.set('id', time_idx)
prop.set('label', 't')
prop = etree.SubElement(datagroup, 'property')
prop.set('description', '')
prop.set('name', hist.type)
prop.set('units', str(hist.quantity.units))
quant_idx = 'x{}'.format(len(property_idx) + 1)
property_idx[quant_idx] = {'name': hist.type}
prop.set('id', quant_idx)
prop.set('label', 'V')
for time, quantity in zip(hist.time, hist.quantity):
datapoint = etree.SubElement(datagroup, 'dataPoint')
value = etree.SubElement(datapoint, time_idx)
value.text = str(time.magnitude)
value = etree.SubElement(datapoint, quant_idx)
value.text = str(quantity.magnitude)
ign_types = [getattr(dp, 'ignition_type', False) for dp in self.datapoints]
# All datapoints must have the same ignition target and type
if all(ign_types) and ign_types.count(ign_types[0]) == len(ign_types):
# In ReSpecTh files all datapoints must share ignition type
ignition = etree.SubElement(root, 'ignitionType')
if ign_types[0]['target'] in ['pressure', 'temperature']:
ignition.set('target', ign_types[0]['target'][0].upper())
else:
# options left are species
ignition.set('target', self.datapoints[0].ignition_type['target'])
if ign_types[0]['type'] == 'd/dt max extrapolated':
ignition.set('type', 'baseline max intercept from d/dt')
else:
ignition.set('type', self.datapoints[0].ignition_type['type'])
else:
raise NotImplementedError('Different ignition targets or types for multiple datapoints '
'are not supported in ReSpecTh.')
et = etree.ElementTree(root)
et.write(filename, encoding='utf-8', xml_declaration=True)
# now do a "pretty" rewrite
xml = minidom.parse(filename)
xml_string = xml.toprettyxml(indent=' ')
with open(filename, 'w') as f:
f.write(xml_string)
print('Converted to ' + filename)
class DataPoint(object):
"""Class for a single datapoint.
The `DataPoint` class stores the information associated with a single data point in the dataset
parsed from the `ChemKED` YAML input.
Arguments:
properties (`dict`): Dictionary adhering to the ChemKED format for ``datapoints``
Attributes:
composition (`list`): List of dictionaries representing the species and their quantities
ignition_delay (pint.Quantity): The ignition delay of the experiment
temperature (pint.Quantity): The temperature of the experiment
pressure (pint.Quantity): The pressure of the experiment
pressure_rise (pint.Quantity, optional): The amount of pressure rise during the induction
period of a shock tube experiment.
compression_time (pint.Quantity, optional): The compression time for an RCM experiment.
compressed_pressure (pint.Quantity, optional): The pressure at the end of compression for
an RCM experiment.
compressed_temperature (pint.Quantity, optional): The temperature at the end of compression
for an RCM experiment.
first_stage_ignition_delay (pint.Quantity, optional): The first stage ignition delay of the
experiment.
compression_time (pint.Quantity, optional): The compression time for an RCM experiment.
ignition_type (`dict`): Dictionary with the ignition target and type.
volume_history (`~collections.namedtuple`, optional): The volume history of the reactor
during an RCM experiment.
pressure_history (`~collections.namedtuple`, optional): The pressure history of the reactor
during an experiment.
temperature_history (`~collections.namedtuple`, optional): The temperature history of the
reactor during an experiment.
piston_position_history (`~collections.namedtuple`, optional): The piston position history
of the reactor during an RCM experiment.
light_emission_history (`~collections.namedtuple`, optional): The light emission history
of the reactor during an experiment.
OH_emission_history (`~collections.namedtuple`, optional): The OH emission history of the
reactor during an experiment.
absorption_history (`~collections.namedtuple`, optional): The absorption history of the
reactor during an experiment.
"""
value_unit_props = [
'ignition-delay', 'first-stage-ignition-delay', 'temperature', 'pressure',
'pressure-rise',
]
rcm_data_props = [
'compressed-pressure', 'compressed-temperature', 'compression-time', 'stroke', 'clearance',
'compression-ratio'
]
def __init__(self, properties):
for prop in self.value_unit_props:
if prop in properties:
quant = self.process_quantity(properties[prop])
setattr(self, prop.replace('-', '_'), quant)
else:
setattr(self, prop.replace('-', '_'), None)
if 'rcm-data' in properties:
orig_rcm_data = properties['rcm-data']
rcm_props = {}
for prop in self.rcm_data_props:
if prop in orig_rcm_data:
quant = self.process_quantity(orig_rcm_data[prop])
rcm_props[prop.replace('-', '_')] = quant
else:
rcm_props[prop.replace('-', '_')] = None
self.rcm_data = RCMData(**rcm_props)
else:
self.rcm_data = None
self.composition_type = properties['composition']['kind']
composition = {}
for species in properties['composition']['species']:
species_name = species['species-name']
amount = self.process_quantity(species['amount'])
InChI = species.get('InChI')
SMILES = species.get('SMILES')
atomic_composition = species.get('atomic-composition')
composition[species_name] = Composition(
species_name=species_name, InChI=InChI, SMILES=SMILES,
atomic_composition=atomic_composition, amount=amount)
setattr(self, 'composition', composition)
self.equivalence_ratio = properties.get('equivalence-ratio')
self.ignition_type = deepcopy(properties.get('ignition-type'))
if 'time-histories' in properties and 'volume-history' in properties:
raise TypeError('time-histories and volume-history are mutually exclusive')
if 'time-histories' in properties:
for hist in properties['time-histories']:
if hasattr(self, '{}_history'.format(hist['type'].replace(' ', '_'))):
raise ValueError('Each history type may only be specified once. {} was '
'specified multiple times'.format(hist['type']))
time_col = hist['time']['column']
time_units = hist['time']['units']
quant_col = hist['quantity']['column']
quant_units = hist['quantity']['units']
if isinstance(hist['values'], list):
values = np.array(hist['values'])
else:
# Load the values from a file
values = np.genfromtxt(hist['values']['filename'], delimiter=',')
time_history = TimeHistory(
time=Q_(values[:, time_col], time_units),
quantity=Q_(values[:, quant_col], quant_units),
type=hist['type'],
)
setattr(self, '{}_history'.format(hist['type'].replace(' ', '_')), time_history)
if 'volume-history' in properties:
warn('The volume-history field should be replaced by time-histories. '
'volume-history will be removed after PyKED 0.4',
DeprecationWarning)
time_col = properties['volume-history']['time']['column']
time_units = properties['volume-history']['time']['units']
volume_col = properties['volume-history']['volume']['column']
volume_units = properties['volume-history']['volume']['units']
values = np.array(properties['volume-history']['values'])
self.volume_history = VolumeHistory(
time=Q_(values[:, time_col], time_units),
volume=Q_(values[:, volume_col], volume_units),
)
history_types = ['volume', 'temperature', 'pressure', 'piston_position', 'light_emission',
'OH_emission', 'absorption']
for h in history_types:
if not hasattr(self, '{}_history'.format(h)):
setattr(self, '{}_history'.format(h), None)
def process_quantity(self, properties):
"""Process the uncertainty information from a given quantity and return it
"""
quant = Q_(properties[0])
if len(properties) > 1:
unc = properties[1]
uncertainty = unc.get('uncertainty', False)
upper_uncertainty = unc.get('upper-uncertainty', False)
lower_uncertainty = unc.get('lower-uncertainty', False)
uncertainty_type = unc.get('uncertainty-type')
if uncertainty_type == 'relative':
if uncertainty:
quant = quant.plus_minus(float(uncertainty), relative=True)
elif upper_uncertainty and lower_uncertainty:
warn('Asymmetric uncertainties are not supported. The '
'maximum of lower-uncertainty and upper-uncertainty '
'has been used as the symmetric uncertainty.')
uncertainty = max(float(upper_uncertainty), float(lower_uncertainty))
quant = quant.plus_minus(uncertainty, relative=True)
else:
raise ValueError('Either "uncertainty" or "upper-uncertainty" and '
'"lower-uncertainty" need to be specified.')
elif uncertainty_type == 'absolute':
if uncertainty:
uncertainty = Q_(uncertainty)
quant = quant.plus_minus(uncertainty.to(quant.units).magnitude)
elif upper_uncertainty and lower_uncertainty:
warn('Asymmetric uncertainties are not supported. The '
'maximum of lower-uncertainty and upper-uncertainty '
'has been used as the symmetric uncertainty.')
uncertainty = max(Q_(upper_uncertainty), Q_(lower_uncertainty))
quant = quant.plus_minus(uncertainty.to(quant.units).magnitude)
else:
raise ValueError('Either "uncertainty" or "upper-uncertainty" and '
'"lower-uncertainty" need to be specified.')
else:
raise ValueError('uncertainty-type must be one of "absolute" or "relative"')
return quant
def get_cantera_composition_string(self, species_conversion=None):
"""Get the composition in a string format suitable for input to Cantera.
Returns a formatted string no matter the type of composition. As such, this method
is not recommended for end users; instead, prefer the `get_cantera_mole_fraction`
or `get_cantera_mass_fraction` methods.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type of the `DataPoint` is not one of
``'mass fraction'``, ``'mole fraction'``, or ``'mole percent'``
"""
if self.composition_type in ['mole fraction', 'mass fraction']:
factor = 1.0
elif self.composition_type == 'mole percent':
factor = 100.0
else:
raise ValueError('Unknown composition type: {}'.format(self.composition_type))
if species_conversion is None:
comps = ['{!s}:{:.4e}'.format(c.species_name,
c.amount.magnitude/factor) for c in self.composition.values()]
else:
comps = []
for c in self.composition.values():
amount = c.amount.magnitude/factor
idents = [getattr(c, s, False) for s in ['species_name', 'InChI', 'SMILES']]
present = [i in species_conversion for i in idents]
if not any(present):
comps.append('{!s}:{:.4e}'.format(c.species_name, amount))
else:
if len([i for i in present if i]) > 1:
raise ValueError('More than one conversion present for species {}'.format(
c.species_name))
ident = idents[present.index(True)]
species_replacement_name = species_conversion.pop(ident)
comps.append('{!s}:{:.4e}'.format(species_replacement_name, amount))
if len(species_conversion) > 0:
raise ValueError('Unknown species in conversion: {}'.format(species_conversion))
return ', '.join(comps)
def get_cantera_mole_fraction(self, species_conversion=None):
"""Get the mole fractions in a string format suitable for input to Cantera.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String of mole fractions in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type is ``'mass fraction'``, the conversion cannot
be done because no molecular weight information is known
Examples:
>>> dp = DataPoint(properties)
>>> dp.get_cantera_mole_fraction()
'H2:4.4400e-03, O2:5.5600e-03, Ar:9.9000e-01'
>>> species_conversion = {'H2': 'h2', 'O2': 'o2'}
>>> dp.get_cantera_mole_fraction(species_conversion)
'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01'
>>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'}
>>> dp.get_cantera_mole_fraction(species_conversion)
'h2:4.4400e-03, o2:5.5600e-03, Ar:9.9000e-01'
"""
if self.composition_type == 'mass fraction':
raise ValueError('Cannot get mole fractions from the given composition.\n'
'{}'.format(self.composition))
else:
return self.get_cantera_composition_string(species_conversion)
def get_cantera_mass_fraction(self, species_conversion=None):
"""Get the mass fractions in a string format suitable for input to Cantera.
Arguments:
species_conversion (`dict`, optional): Mapping of species identifier to a
species name. This argument should be supplied when the name of the
species in the ChemKED YAML file does not match the name of the same
species in a chemical kinetic mechanism. The species identifier (the key
of the mapping) can be the name, InChI, or SMILES provided in the ChemKED
file, while the value associated with a key should be the desired name in
the Cantera format output string.
Returns:
`str`: String of mass fractions in the ``SPEC:AMT, SPEC:AMT`` format
Raises:
`ValueError`: If the composition type is ``'mole fraction'`` or
``'mole percent'``, the conversion cannot be done because no molecular
weight information is known
Examples:
>>> dp = DataPoint(properties)
>>> dp.get_cantera_mass_fraction()
'H2:2.2525e-04, O2:4.4775e-03, Ar:9.9530e-01'
>>> species_conversion = {'H2': 'h2', 'O2': 'o2'}
>>> dp.get_cantera_mass_fraction(species_conversion)
'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
>>> species_conversion = {'1S/H2/h1H': 'h2', '1S/O2/c1-2': 'o2'}
>>> dp.get_cantera_mass_fraction(species_conversion)
'h2:2.2525e-04, o2:4.4775e-03, Ar:9.9530e-01'
"""
if self.composition_type in ['mole fraction', 'mole percent']:
raise ValueError('Cannot get mass fractions from the given composition.\n'
'{}'.format(self.composition)
)
else:
return self.get_cantera_composition_string(species_conversion)
|
bsd-3-clause
| -6,256,631,093,448,223,000
| 48.701912
| 130
| 0.583388
| false
| 4.368265
| false
| false
| false
|
Anvil/maestro-ng
|
maestro/loader.py
|
1
|
2758
|
# Copyright (C) 2015 SignalFx, Inc. All rights reserved.
#
# Docker container orchestration utility.
import jinja2
import os
import sys
import yaml
from . import exceptions
class MaestroYamlConstructor(yaml.constructor.Constructor):
"""A PyYAML object constructor that errors on duplicate keys in YAML
mappings. Because for some reason PyYAML doesn't do that since 3.x."""
def construct_mapping(self, node, deep=False):
if not isinstance(node, yaml.nodes.MappingNode):
raise yaml.constructor.ConstructorError(
None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
keys = set()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
if key in keys:
raise yaml.constructor.ConstructorError(
"while constructing a mapping", node.start_mark,
"found duplicate key (%s)" % key, key_node.start_mark)
keys.add(key)
return yaml.constructor.Constructor.construct_mapping(self, node, deep)
class MaestroYamlLoader(yaml.reader.Reader, yaml.scanner.Scanner,
yaml.parser.Parser, yaml.composer.Composer,
MaestroYamlConstructor, yaml.resolver.Resolver):
"""A custom YAML Loader that uses the custom MaestroYamlConstructor."""
def __init__(self, stream):
yaml.reader.Reader.__init__(self, stream)
yaml.scanner.Scanner.__init__(self)
yaml.parser.Parser.__init__(self)
yaml.composer.Composer.__init__(self)
MaestroYamlConstructor.__init__(self)
yaml.resolver.Resolver.__init__(self)
def load(filename):
"""Load a config from the given file.
Args:
filename (string): Path to the YAML environment description
configuration file to load. Use '-' for stdin.
Returns:
A python data structure corresponding to the YAML configuration.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(filename)),
extensions=['jinja2.ext.with_'])
try:
if filename == '-':
template = env.from_string(sys.stdin.read())
else:
template = env.get_template(os.path.basename(filename))
except jinja2.exceptions.TemplateNotFound:
raise exceptions.MaestroException(
'Environment description file {} not found!'.format(filename))
except Exception as e:
raise exceptions.MaestroException(
'Error reading environment description file {}: {}!'
.format(filename, e))
return yaml.load(template.render(env=os.environ), Loader=MaestroYamlLoader)
|
apache-2.0
| 4,613,575,730,410,633,000
| 36.27027
| 79
| 0.638869
| false
| 4.275969
| false
| false
| false
|
bootstraponline/testdroid_device_finder
|
device_finder.py
|
1
|
4900
|
# -*- coding: utf-8 -*-
# from: https://github.com/bitbar/testdroid-samples/blob/03fc043ba98235b9ea46a0ab8646f3b20dd1960e/appium/sample-scripts/python/device_finder.py
import os, sys, requests, json, time, httplib
from optparse import OptionParser
from urlparse import urljoin
from datetime import datetime
class DeviceFinder:
# Cloud URL (not including API path)
url = None
# Oauth access token
access_token = None
# Oauth refresh token
refresh_token = None
# Unix timestamp (seconds) when token expires
token_expiration_time = None
""" Full constructor with username and password
"""
def __init__(self, username=None, password=None, url="https://cloud.testdroid.com", download_buffer_size=65536):
self.username = username
self.password = password
self.cloud_url = url
self.download_buffer_size = download_buffer_size
""" Get Oauth2 token
"""
def get_token(self):
if not self.access_token:
# TODO: refresh
url = "%s/oauth/token" % self.cloud_url
payload = {
"client_id": "testdroid-cloud-api",
"grant_type": "password",
"username": self.username,
"password": self.password
}
res = requests.post(
url,
data = payload,
headers = { "Accept": "application/json" }
)
if res.status_code != 200:
print "FAILED: Authentication or connection failure. Check Testdroid Cloud URL and your credentials."
sys.exit(-1)
reply = res.json()
self.access_token = reply['access_token']
self.refresh_token = reply['refresh_token']
self.token_expiration_time = time.time() + reply['expires_in']
elif self.token_expiration_time < time.time():
url = "%s/oauth/token" % self.cloud_url
payload = {
"client_id": "testdroid-cloud-api",
"grant_type": "refresh_token",
"refresh_token": self.refresh_token
}
res = requests.post(
url,
data = payload,
headers = { "Accept": "application/json" }
)
if res.status_code != 200:
print "FAILED: Unable to get a new access token using refresh token"
self.access_token = None
return self.get_token()
reply = res.json()
self.access_token = reply['access_token']
self.refresh_token = reply['refresh_token']
self.token_expiration_time = time.time() + reply['expires_in']
return self.access_token
""" Helper method for getting necessary headers to use for API calls, including authentication
"""
def _build_headers(self):
return { "Authorization": "Bearer %s" % self.get_token(), "Accept": "application/json" }
""" GET from API resource
"""
def get(self, path=None, payload={}, headers={}):
if path.find('v2/') >= 0:
cut_path = path.split('v2/')
path = cut_path[1]
url = "%s/api/v2/%s" % (self.cloud_url, path)
headers = dict(self._build_headers().items() + headers.items())
res = requests.get(url, params=payload, headers=headers)
if headers['Accept'] == 'application/json':
return res.json()
else:
return res.text
""" Returns list of devices
"""
def get_devices(self, limit=0):
return self.get("devices?limit=%s" % (limit))
""" Find available free Android device
"""
def available_free_android_device(self, limit=0):
print "Searching Available Free Android Device..."
for device in self.get_devices(limit)['data']:
if device['creditsPrice'] == 0 and device['locked'] == False and device['osType'] == "ANDROID" and device['softwareVersion']['apiLevel'] > 16:
print "Found device '%s'" % device['displayName']
print ""
return device['displayName']
print "No available device found"
print ""
return ""
""" Find available free iOS device
"""
def available_free_ios_device(self, limit=0):
print "Searching Available Free iOS Device..."
for device in self.get_devices(limit)['data']:
if device['creditsPrice'] == 0 and device['locked'] == False and device['osType'] == "IOS":
print "Found device '%s'" % device['displayName']
print ""
return device['displayName']
print "No available device found"
print ""
return ""
|
apache-2.0
| -8,658,598,604,070,463,000
| 35.842105
| 154
| 0.546327
| false
| 4.340124
| false
| false
| false
|
bolkedebruin/airflow
|
tests/providers/datadog/hooks/test_datadog.py
|
1
|
4769
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from unittest import mock
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.datadog.hooks.datadog import DatadogHook
APP_KEY = 'app_key'
API_KEY = 'api_key'
METRIC_NAME = 'metric'
DATAPOINT = 7
TAGS = ['tag']
TYPE = 'rate'
INTERVAL = 30
TITLE = 'title'
TEXT = 'text'
AGGREGATION_KEY = 'aggregation-key'
ALERT_TYPE = 'warning'
DATE_HAPPENED = 12345
HANDLE = 'handle'
PRIORITY = 'normal'
RELATED_EVENT_ID = 7
DEVICE_NAME = 'device-name'
class TestDatadogHook(unittest.TestCase):
@mock.patch('airflow.providers.datadog.hooks.datadog.initialize')
@mock.patch('airflow.providers.datadog.hooks.datadog.DatadogHook.get_connection')
def setUp(self, mock_get_connection, mock_initialize):
mock_get_connection.return_value = Connection(extra=json.dumps({
'app_key': APP_KEY,
'api_key': API_KEY,
}))
self.hook = DatadogHook()
@mock.patch('airflow.providers.datadog.hooks.datadog.initialize')
@mock.patch('airflow.providers.datadog.hooks.datadog.DatadogHook.get_connection')
def test_api_key_required(self, mock_get_connection, mock_initialize):
mock_get_connection.return_value = Connection()
with self.assertRaises(AirflowException) as ctx:
DatadogHook()
self.assertEqual(str(ctx.exception),
'api_key must be specified in the Datadog connection details')
def test_validate_response_valid(self):
try:
self.hook.validate_response({'status': 'ok'})
except AirflowException:
self.fail('Unexpected AirflowException raised')
def test_validate_response_invalid(self):
with self.assertRaises(AirflowException):
self.hook.validate_response({'status': 'error'})
@mock.patch('airflow.providers.datadog.hooks.datadog.api.Metric.send')
def test_send_metric(self, mock_send):
mock_send.return_value = {'status': 'ok'}
self.hook.send_metric(
METRIC_NAME,
DATAPOINT,
tags=TAGS,
type_=TYPE,
interval=INTERVAL,
)
mock_send.assert_called_once_with(
metric=METRIC_NAME,
points=DATAPOINT,
host=self.hook.host,
tags=TAGS,
type=TYPE,
interval=INTERVAL,
)
@mock.patch('airflow.providers.datadog.hooks.datadog.api.Metric.query')
@mock.patch('airflow.providers.datadog.hooks.datadog.time.time')
def test_query_metric(self, mock_time, mock_query):
now = 12345
mock_time.return_value = now
mock_query.return_value = {'status': 'ok'}
self.hook.query_metric('query', 60, 30)
mock_query.assert_called_once_with(
start=now - 60,
end=now - 30,
query='query',
)
@mock.patch('airflow.providers.datadog.hooks.datadog.api.Event.create')
def test_post_event(self, mock_create):
mock_create.return_value = {'status': 'ok'}
self.hook.post_event(
TITLE,
TEXT,
aggregation_key=AGGREGATION_KEY,
alert_type=ALERT_TYPE,
date_happened=DATE_HAPPENED,
handle=HANDLE,
priority=PRIORITY,
related_event_id=RELATED_EVENT_ID,
tags=TAGS,
device_name=DEVICE_NAME,
)
mock_create.assert_called_once_with(
title=TITLE,
text=TEXT,
aggregation_key=AGGREGATION_KEY,
alert_type=ALERT_TYPE,
date_happened=DATE_HAPPENED,
handle=HANDLE,
priority=PRIORITY,
related_event_id=RELATED_EVENT_ID,
tags=TAGS,
host=self.hook.host,
device_name=DEVICE_NAME,
source_type_name=self.hook.source_type_name,
)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -8,385,485,857,153,462,000
| 33.064286
| 87
| 0.63787
| false
| 3.766983
| true
| false
| false
|
redhat-cip/python-tripleo-wrapper
|
rdomhelper/ssh.py
|
1
|
11391
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paramiko
from paramiko import ssh_exception
import io
import logging
import select
import time
LOG = logging.getLogger('__chainsaw__')
class SshClient(object):
"""SSH client based on Paramiko.
This class implements the following features:
- run commands on a remote host
- send file to a remote host
- redirect connection to another ssh server so that every commands will
be executed on the redirected host
- send files
- create remote files
"""
def __init__(self, hostname, user, key_filename=None,
via_ip=None):
""":param hostname: the host on which to connect
:type hostname: str
:param user: the user to use for the connection
:type user: str
:param key_filename: the private key path to use, by default it will
use the system host keys
:type key_filename: str
:param redirect_to_host: the host on which to redirect, by default it
will use the port 22
:type redirect_to_host: str
"""
assert hostname, 'hostname is defined.'
assert user, 'user is defined.'
self._hostname = hostname
self._user = user
self._key_filename = key_filename
self.load_private_key(key_filename)
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self._via_ip = via_ip
self._transport = None
self._started = False
self.description = 'not started yet'
self._environment_filenames = []
def load_private_key(self, priv_key):
"""Register the SSH private key."""
with open(priv_key) as fd:
self._private_key = paramiko.RSAKey.from_private_key(fd)
def _get_transport_via_ip(self):
exception = None
for i in range(60):
try:
channel = self._client.get_transport().open_channel(
'direct-tcpip',
(self._hostname, 22),
(self._via_ip, 0))
except ssh_exception.ChannelException as exception:
LOG.debug('%s creating the direct-tcip connections' % self.description)
time.sleep(1)
else:
transport = paramiko.Transport(channel)
transport.start_client()
transport.auth_publickey(self._user, self._private_key)
return transport
raise exception
def _get_transport(self):
if self._via_ip:
transport = self._get_transport_via_ip()
else:
transport = self._client.get_transport()
transport.set_keepalive(10)
return transport
def start(self):
"""Start the ssh client and connect to the host.
It will wait until the ssh service is available during 90 seconds.
If it doesn't succed to connect then the function will raise
an SSHException.
"""
if self._via_ip:
connect_to = self._via_ip
self.description = '[%s@%s via %s]' % (self._user,
self._hostname,
self._via_ip)
else:
connect_to = self._hostname
self.description = '[%s@%s]' % (self._user,
self._hostname)
for i in range(60):
try:
self._client.connect(
connect_to,
username=self._user,
allow_agent=True,
key_filename=self._key_filename)
# NOTE(Gonéri): TypeError is in the list because of
# https://github.com/paramiko/paramiko/issues/615
self._transport = self._get_transport()
except (OSError,
TypeError,
ssh_exception.SSHException,
ssh_exception.NoValidConnectionsError) as e:
LOG.info('%s waiting for %s' % (self.description, connect_to))
LOG.debug("exception: '%s'" % str(e))
time.sleep(1)
else:
LOG.debug('%s connected' % self.description)
self._started = True
return
_error = ("unable to connect to ssh service on '%s'" % self._hostname)
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def _check_started(self):
if not self._started:
_error = "ssh client not started, please start the client"
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def stop(self):
"""Close the ssh connection."""
self._started = False
self._client.close()
def run(self, cmd, sudo=False, ignore_error=False, success_status=(0,),
error_callback=None, custom_log=None):
"""Run a command on the remote host.
The command is run on the remote host, if there is a redirected host
then the command will be run on that redirected host. See __init__.
:param cmd: the command to run
:type cmd: str
:param sudo: True if the command should be run with sudo, this parameter
disable the use of environment files.
:type sudo: str
:param success_status: the list of the possible success status
:type success_status: list
:param error_callback: if provided, the callback to call in case of
a failure. it will be called with two args, the output of the command
and the returned error code.
:return: the tuple (output of the command, returned code)
:rtype: tuple
:param custom_log: a optional string to record in the log instead of the command.
This is useful for example if you want to hide a password.
:type custom_log: str
"""
self._check_started()
cmd_output = io.StringIO()
channel = self._get_channel()
if sudo:
cmd = "sudo %s" % cmd
else:
for filename in self._environment_filenames:
cmd = '. %s; %s' % (filename, cmd)
if not custom_log:
custom_log = cmd
LOG.info("%s run '%s'" % (self.description, custom_log))
channel.exec_command(cmd)
while True:
if channel.exit_status_ready():
break
rl, _, _ = select.select([channel], [], [], 30)
if rl:
received = channel.recv(1024).decode('UTF-8', 'ignore').strip()
if received:
LOG.debug(received)
cmd_output.write(received)
cmd_output = cmd_output.getvalue()
exit_status = channel.exit_status
if ignore_error or channel.exit_status in success_status:
return cmd_output, channel.exit_status
elif error_callback:
return error_callback(cmd_output, exit_status)
else:
_error = ("%s command %s has failed with, rc='%s'" %
(self.description, custom_log, exit_status))
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def _get_channel(self):
"""Returns a channel according to if there is a redirection to do or
not.
"""
channel = self._transport.open_session()
channel.set_combine_stderr(True)
channel.get_pty()
return channel
def send_file(self, local_path, remote_path):
"""Send a file to the remote host.
:param local_path: the local path of the file
:type local_path: str
:param remote_path: the remote path of the file
:type remote_path: str
:return: the file attributes
:rtype: paramiko.sftp_attr.SFTPAttributes
"""
self._check_started()
sftp = paramiko.SFTPClient.from_transport(self._transport)
return sftp.put(local_path, remote_path)
def create_file(self, path, content, mode='w'):
"""Create a file with a content.
:param path: the path of the file.
:type path: str
:param content: the content of the file
:type content: str
:param mode: the mode of the file while opening it
:type mode: str
"""
self._check_started()
sftp = paramiko.SFTPClient.from_transport(self._transport)
with sftp.open(path, mode) as remote_file:
remote_file.write(content)
remote_file.flush()
def info(self):
return {'hostname': self._hostname,
'user': self._user,
'key_filename': self._key_filename}
def add_environment_file(self, filename):
self._environment_filenames.append(filename)
class PoolSshClient(object):
def __init__(self):
self._ssh_clients = {}
def build_ssh_client(self, hostname, user, key_filename=None,
via_ip=None):
_ssh_client = SshClient(hostname, user, key_filename,
via_ip)
_ssh_client.start()
self._ssh_clients[user] = _ssh_client
def add_ssh_client(self, user, ssh_client):
self._ssh_clients[user] = ssh_client
def del_ssh_client(self, user):
self._check_ssh_client(user)
del self._ssh_clients[user]
def get_client(self, user):
self._check_ssh_client(user)
return self._ssh_clients[user]
def _check_ssh_client(self, user):
if user not in self._ssh_clients.keys():
_error = "ssh client for user %s not existing" % user
LOG.error(_error)
raise ssh_exception.SSHException(_error)
def run(self, user, cmd, sudo=False, ignore_error=False,
success_status=(0,), error_callback=None, custom_log=None):
self._check_ssh_client(user)
return self._ssh_clients[user].run(
cmd,
sudo=sudo,
ignore_error=ignore_error,
success_status=success_status,
error_callback=error_callback,
custom_log=custom_log)
def send_file(self, user, local_path, remote_path):
self._check_ssh_client(user)
return self._ssh_clients[user].send_file(local_path, remote_path)
def create_file(self, user, path, content, mode='w'):
self._check_ssh_client(user)
return self._ssh_clients[user].create_file(path, content, mode)
def stop_all(self):
for ssh_client in self._ssh_clients.values():
ssh_client.stop()
def add_environment_file(self, user, filename):
self._check_ssh_client(user)
self._ssh_clients[user].add_environment_file(filename)
|
apache-2.0
| -2,446,650,625,284,174,300
| 35.623794
| 89
| 0.576207
| false
| 4.251586
| false
| false
| false
|
hermestrimegiste/patchtgtel
|
patchConnectionTogotelecom.py
|
1
|
2031
|
#-*- coding:utf-8 -*-
__author__ = 'hermes'
import socket
from os import system
from time import sleep
from datetime import datetime
global connectionName
connectionName = 'TOGOTELECOM' # Definir le nom de votre reseau
def is_connected():
# http://stackoverflow.com/questions/20913411/test-if-an-internet-connection-is-present-in-python
try:
#host = socket.gethostbyname("www.google.com")
#socket.create_connection(('173.194.67.94', 80), 25)
#methode 2 sans test de connection
socket.gethostbyname("www.google.com")
return True
except:
try:
socket.create_connection(('173.194.67.94', 80), 15)
return True
except:
pass
pass
return False
def hardRestartNetwork():
system('nmcli nm enable false')
system('nmcli nm enable true')
sleep(5)
system("nmcli con up id '%s'"% connectionName)
def patchTogotelecom():
activeReseau = system('nmcli nm enable true')
deconnectionSoft = system('nmcli dev disconnect iface ttyUSB0')
sleep(5)
if (deconnectionSoft == 0 or deconnectionSoft == 1536):
activeTGTEL = system("nmcli con up id '%s'"% connectionName)
if activeTGTEL == 768:
# si Erreur : le délai d'attente de 90 sec a expiré.
#system('modprobe --force-vermagic usb_wwan usbserial')
hardRestartNetwork()
else:
# redemarrer le reseau si la methode soft ne marche pas
hardRestartNetwork()
if is_connected():
print(u'Connecté le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
else:
print(u'Tentative echoué le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
# sleep(5)
# debut de l execution du script
#system('modprobe --force-vermagic usb_wwan usbserial')
hardRestartNetwork()
print(u'debut du script > %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
sleep(5)
while True:
if is_connected():
sleep(60)
else:
print(u'Tentative de reconnexion le %s '%str(datetime.now().strftime('%d-%m-%Y -> %H:%M:%S')))
patchTogotelecom()
|
gpl-2.0
| -6,435,792,570,533,313,000
| 26.378378
| 100
| 0.659427
| false
| 3.165625
| false
| false
| false
|
nigelb/SerialGrabber
|
serial_grabber/cli.py
|
1
|
2827
|
#!/usr/bin/env python
# SerialGrabber reads data from a serial port and processes it with the
# configured processor.
# Copyright (C) 2012 NigelB
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import signal
import time
from SerialGrabber_Storage import storage_cache
from serial_grabber.commander import MultiProcessParameterFactory
from serial_grabber.util import config_helper
from serial_grabber.watchdog import running, counter, Watchdog
from serial_grabber.processor import ProcessorManager
class status:
def __init__(self, logger):
self.logger = logger
def set_tooltip(self, tooltip):
self.logger.info(tooltip)
def register_handler(running, watchdog, reader, processor, command):
def signal_handler(signal, frame):
print 'You pressed Ctrl+C!'
running.running = False
if command:
command.stop()
watchdog.join()
if reader:
reader.close()
exit(0)
signal.signal(signal.SIGINT, signal_handler)
def start(logger, reader, processor, command):
try:
si = status(logger)
isRunning = running(True)
c = counter(si)
params = config_helper({
"counter": c,
"running": isRunning
})
if issubclass(command.__class__, MultiProcessParameterFactory):
command.populate_parameters(params)
if issubclass(reader.__class__, MultiProcessParameterFactory):
reader.populate_parameters(params)
if issubclass(processor.__class__, MultiProcessParameterFactory):
processor.populate_parameters(params)
watchdog = Watchdog(isRunning)
register_handler(isRunning, watchdog, reader, processor, command)
if reader:
watchdog.start_thread(reader, (isRunning, c, params), "Runner")
if processor:
watchdog.start_thread(ProcessorManager(processor), (isRunning, c, params), "Processor")
if command and reader:
watchdog.start_thread(command, (isRunning, c, params), "Commander")
while isRunning.running:
time.sleep(1)
finally:
storage_cache.close_cache()
|
gpl-2.0
| -6,709,110,638,634,718,000
| 34.3375
| 99
| 0.686594
| false
| 4.263952
| false
| false
| false
|
Hybrid-Cloud/badam
|
patches_tool/aws_patch/aws_deps/libcloud/compute/drivers/vultr.py
|
1
|
6023
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Vultr Driver
"""
import time
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlencode
from libcloud.common.base import ConnectionKey, JsonResponse
from libcloud.compute.types import Provider, NodeState
from libcloud.common.types import LibcloudError, InvalidCredsError
from libcloud.compute.base import NodeDriver
from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation
class VultrResponse(JsonResponse):
def parse_error(self):
if self.status == httplib.OK:
body = self.parse_body()
return body
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
else:
raise LibcloudError(self.body)
class VultrConnection(ConnectionKey):
"""
Connection class for the Vultr driver.
"""
host = 'api.vultr.com'
responseCls = VultrResponse
def add_default_params(self, params):
"""
Add parameters that are necessary for every request
This method add ``api_key`` to
the request.
"""
params['api_key'] = self.key
return params
def encode_data(self, data):
return urlencode(data)
def get(self, url):
return self.request(url)
def post(self, url, data):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return self.request(url, data=data, headers=headers, method='POST')
class VultrNodeDriver(NodeDriver):
"""
VultrNode node driver.
"""
connectionCls = VultrConnection
type = Provider.VULTR
name = 'Vultr'
website = 'https://www.vultr.com'
NODE_STATE_MAP = {'pending': NodeState.PENDING,
'active': NodeState.RUNNING}
def list_nodes(self):
return self._list_resources('/v1/server/list', self._to_node)
def list_locations(self):
return self._list_resources('/v1/regions/list', self._to_location)
def list_sizes(self):
return self._list_resources('/v1/plans/list', self._to_size)
def list_images(self):
return self._list_resources('/v1/os/list', self._to_image)
def create_node(self, name, size, image, location):
params = {'DCID': location.id, 'VPSPLANID': size.id,
'OSID': image.id, 'label': name}
result = self.connection.post('/v1/server/create', params)
if result.status != httplib.OK:
return False
subid = result.object['SUBID']
retry_count = 3
created_node = None
for i in range(retry_count):
try:
nodes = self.list_nodes()
created_node = [n for n in nodes if n.id == subid][0]
except IndexError:
time.sleep(1)
pass
else:
break
return created_node
def reboot_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/reboot', params)
return res.status == httplib.OK
def destroy_node(self, node):
params = {'SUBID': node.id}
res = self.connection.post('/v1/server/destroy', params)
return res.status == httplib.OK
def _list_resources(self, url, tranform_func):
data = self.connection.get(url).object
sorted_key = sorted(data)
return [tranform_func(data[key]) for key in sorted_key]
def _to_node(self, data):
if 'status' in data:
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
if state == NodeState.RUNNING and \
data['power_status'] != 'running':
state = NodeState.STOPPED
else:
state = NodeState.UNKNOWN
if 'main_ip' in data and data['main_ip'] is not None:
public_ips = [data['main_ip']]
else:
public_ips = []
extra_keys = []
extra = {}
for key in extra_keys:
if key in data:
extra[key] = data[key]
node = Node(id=data['SUBID'], name=data['label'], state=state,
public_ips=public_ips, private_ips=None, extra=extra,
driver=self)
return node
def _to_location(self, data):
return NodeLocation(id=data['DCID'], name=data['name'],
country=data['country'], driver=self)
def _to_size(self, data):
extra = {'vcpu_count': int(data['vcpu_count'])}
ram = int(data['ram'])
disk = int(data['disk'])
bandwidth = float(data['bandwidth'])
price = float(data['price_per_month'])
return NodeSize(id=data['VPSPLANID'], name=data['name'],
ram=ram, disk=disk,
bandwidth=bandwidth, price=price,
extra=extra, driver=self)
def _to_image(self, data):
extra = {'arch': data['arch'], 'family': data['family']}
return NodeImage(id=data['OSID'], name=data['name'], extra=extra,
driver=self)
|
apache-2.0
| -2,399,194,911,052,198,000
| 30.733696
| 78
| 0.584592
| false
| 4.015333
| false
| false
| false
|
YourCyborg/Sun-RPI
|
src/objects/admin.py
|
1
|
5428
|
#
# This sets up how models are displayed
# in the web admin interface.
#
from django import forms
from django.conf import settings
from django.contrib import admin
from src.objects.models import ObjAttribute, ObjectDB, ObjectNick, Alias
from src.utils.utils import mod_import
class ObjAttributeInline(admin.TabularInline):
model = ObjAttribute
fields = ('db_key', 'db_value')
extra = 0
class NickInline(admin.TabularInline):
model = ObjectNick
fields = ('db_nick', 'db_real', 'db_type')
extra = 0
class AliasInline(admin.TabularInline):
model = Alias
fields = ("db_key",)
extra = 0
class ObjectCreateForm(forms.ModelForm):
"This form details the look of the fields"
class Meta:
model = ObjectDB
db_key = forms.CharField(label="Name/Key",
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Main identifier, like 'apple', 'strong guy', 'Elizabeth' etc. If creating a Character, check so the name is unique among characters!",)
db_typeclass_path = forms.CharField(label="Typeclass",initial="Change to (for example) %s or %s." % (settings.BASE_OBJECT_TYPECLASS, settings.BASE_CHARACTER_TYPECLASS),
widget=forms.TextInput(attrs={'size':'78'}),
help_text="This defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass. If you are creating a Character you should use the typeclass defined by settings.BASE_CHARACTER_TYPECLASS or one derived from that.")
db_permissions = forms.CharField(label="Permissions",
initial=settings.PERMISSION_PLAYER_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="a comma-separated list of text strings checked by certain locks. They are mainly of use for Character objects. Character permissions overload permissions defined on a controlling Player. Most objects normally don't have any permissions defined.")
db_cmdset_storage = forms.CharField(label="CmdSet",
initial=settings.CMDSET_DEFAULT,
required=False,
widget=forms.TextInput(attrs={'size':'78'}),
help_text="Most non-character objects don't need a cmdset and can leave this field blank.")
class ObjectEditForm(ObjectCreateForm):
"Form used for editing. Extends the create one with more fields"
db_lock_storage = forms.CharField(label="Locks",
required=False,
widget=forms.Textarea(attrs={'cols':'100', 'rows':'2'}),
help_text="In-game lock definition string. If not given, defaults will be used. This string should be on the form <i>type:lockfunction(args);type2:lockfunction2(args);...")
class ObjectDBAdmin(admin.ModelAdmin):
list_display = ('id', 'db_key', 'db_location', 'db_player', 'db_typeclass_path')
list_display_links = ('id', 'db_key')
ordering = ['db_player', 'db_typeclass_path', 'id']
search_fields = ['^db_key', 'db_typeclass_path']
save_as = True
save_on_top = True
list_select_related = True
list_filter = ('db_permissions', 'db_location', 'db_typeclass_path')
# editing fields setup
form = ObjectEditForm
fieldsets = (
(None, {
'fields': (('db_key','db_typeclass_path'), ('db_permissions', 'db_lock_storage'),
('db_location', 'db_home'), 'db_destination','db_cmdset_storage'
)}),
)
#deactivated temporarily, they cause empty objects to be created in admin
inlines = [AliasInline]#, ObjAttributeInline]
# Custom modification to give two different forms wether adding or not.
add_form = ObjectCreateForm
add_fieldsets = (
(None, {
'fields': (('db_key','db_typeclass_path'), 'db_permissions',
('db_location', 'db_home'), 'db_destination','db_cmdset_storage'
)}),
)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(ObjectDBAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(ObjectDBAdmin, self).get_form(request, obj, **defaults)
def save_model(self, request, obj, form, change):
if not change:
# adding a new object
obj = obj.typeclass
obj.basetype_setup()
obj.basetype_posthook_setup()
obj.at_object_creation()
obj.at_init()
admin.site.register(ObjectDB, ObjectDBAdmin)
|
bsd-3-clause
| 8,773,561,773,923,069,000
| 42.130081
| 306
| 0.573876
| false
| 4.338929
| false
| false
| false
|
hiatobr/midiacapoeira
|
modules/queries.py
|
1
|
1403
|
# -*- coding: utf-8 -*-
from gluon import current
def tagQuery(tags, ctbl, ttbl, query = 0, op = 'or', field =
'texto_id'):
'''
Busca no banco de dados por conteúdo marcado pelas tags em <tags>.
A operação é recursiva, tag por tag, juntando o resultado de uma
busca ao resultado referente à tag anterior. Essa junção pode ser por
intersecção (op = 'and') ou por união (op = 'or').
Esta implementação preza por generalidade, de modo que a função
pode ser utilizada para buscar qualquer tipo de conteúdo, desde
que a variável <field> seja corretamente preenchida na chamada da
função.
<ctbl> = tabela de conteúdo
<ttbl> = tabela de tags
'''
db = current.db
try:
# Escolhe uma tag e procura por índices de textos que a contêm
tag_ref = db(ttbl.tag==tags.pop()).select(ttbl[field]).as_list()
tag_ref = map(list.pop, map(dict.values, tag_ref))
if query and op == 'or':
return tagQuery(tags, ctbl, ttbl, ctbl.id.belongs(tag_ref) |
query)
elif query and op == 'and':
return tagQuery (tags, ctbl, ttbl,
ctbl.id.belongs(tag_ref) & query)
else:
return tagQuery(tags, ctbl, ttbl, ctbl.id.belongs(tag_ref))
except IndexError:
return db(query).select(ctbl.ALL).as_list()
|
gpl-3.0
| 3,060,283,172,362,273,000
| 34.435897
| 73
| 0.606368
| false
| 3.098655
| false
| false
| false
|
jsaponara/opentaxforms
|
opentaxforms/serve.py
|
1
|
3082
|
#!/usr/bin/env python
from __future__ import print_function, absolute_import
import flask_restless
from argparse import ArgumentParser
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
from .db import connect
from .version import appname, apiVersion
from .ut import Bag
def createApi(app,**kw):
db = SQLAlchemy(app)
conn, engine, metadata, md = connect(appname, **kw)
Base = declarative_base()
Session = sessionmaker(autocommit=False, autoflush=False, bind=engine)
mysession = scoped_session(Session)
apimanager = flask_restless.APIManager(app, session=mysession)
counts = {}
for tabl in md:
tablobj = md[tabl]
counts[tabl] = tablobj.count().execute().fetchone()[0]
attrs = dict(
__table__=tablobj,
# todo should flask_restless need __tablename__?
__tablename__=str(tabl),
)
attrs.update(dict(
orgn=dict(
form=db.relationship('Form'),
),
form=dict(
orgn=db.relationship('Orgn', back_populates='form'),
slot=db.relationship('Slot', back_populates='form'),
),
slot=dict(
form=db.relationship('Form'),
),
)[tabl])
tablcls = type(str(tabl).capitalize(), (Base, ), attrs)
colsToAdd = dict(
orgn=(),
form=(
'orgn', 'orgn.code',
),
slot=(
'form', 'form.code',
),
)[tabl]
colsToShow = [c.name for c in tablobj.columns]
colsToShow.extend(colsToAdd)
# print tabl,colsToShow
apimanager.create_api(
tablcls,
url_prefix='/api/v%s' % (apiVersion, ),
include_columns=colsToShow,
)
return counts
def parseCmdline():
'''Load command line arguments'''
parser = ArgumentParser(
description='Automates tax forms'
' and provides an API for new tax form interfaces'
)
parser.add_argument(
'-P', '--postgres',
help='use postgres database [default=sqlite]', action="store_true")
return parser.parse_args()
def createApp(**kw):
cmdline = kw.get('cmdline')
verbose = kw.get('verbose')
if 'cmdline' in kw:
del kw['cmdline']
if 'verbose' in kw:
del kw['verbose']
args = parseCmdline() if cmdline else Bag(dict(postgres=False))
app = Flask(appname)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False # to suppress warning
counts = createApi(app,postgres=args.postgres, **kw)
if verbose:
print('serving {slot} slots in {form} forms from {orgn} orgns'.format(
**counts))
return app
def main(**kw):
app = createApp(dbpath='sqlite:///opentaxforms.sqlite3', **kw)
app.run()
if __name__ == "__main__":
main(cmdline=True, verbose=True)
|
agpl-3.0
| 2,853,290,267,090,020,000
| 30.131313
| 79
| 0.576249
| false
| 3.926115
| false
| false
| false
|
bbglab/wok
|
wok/core/flow/reader.py
|
1
|
6997
|
###############################################################################
#
# Copyright 2009-2011, Universitat Pompeu Fabra
#
# This file is part of Wok.
#
# Wok is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wok is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses
#
###############################################################################
import os.path
try:
from lxml import etree
except ImportError:
try:
# Python 2.5
import xml.etree.cElementTree as etree
except ImportError:
try:
# Python 2.5+
import xml.etree.ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree
except ImportError:
import sys
sys.stderr.write("Failed to import ElementTree from any known place\n")
raise
from wok.config.data import DataElement, Data
from wok.core.flow.model import *
def str_to_bool(s):
s2b = {
"0" : False, "1" : True,
"no" : False, "yes" : True,
"false" : False, "true" : True}
if s in s2b:
return s2b[s]
else:
return False
class FlowReader(object):
def __init__(self, source):
if isinstance(source, basestring):
self.path = os.path.abspath(source)
self.fp = open(source, "r")
else:
self.path = None
self.fp = source
self.__doc = None
def __read_doc(self):
if self.__doc is None:
self.__doc = etree.parse(self.fp)
return self.__doc
def read_meta(self):
doc = self.__read_doc()
root = doc.getroot()
if root.tag != "flow":
raise Exception("<flow> expected but <{}> found".format(xmle.tag))
name = root.attrib.get("name")
library = root.attrib.get("library")
version = root.attrib.get("version")
return (name, library, version)
def read(self):
doc = self.__read_doc()
root = doc.getroot()
flow = self._parse_flow(root)
if self.path:
flow.path = self.path
return flow
def _parse_base_desc(self, xmle, obj):
if "name" not in xmle.attrib:
raise Exception("'name' attribute not found in tag <{}>".format(xmle.tag))
obj.name = xmle.attrib["name"]
obj.title = xmle.findtext("title")
obj.desc = xmle.findtext("desc")
if "enabled" in xmle:
obj.enabled = str_to_bool(xmle.attr["enabled"])
def _parse_base_port(self, xmle, obj):
self._parse_base_desc(xmle, obj)
if "serializer" in xmle.attrib:
obj.serializer = xmle.attrib["serializer"]
if "wsize" in xmle.attrib:
try:
obj.wsize = int(xmle.attrib["wsize"])
except:
raise Exception("At {} {}: 'wsize' should be a number greater than 0".format(xmle.tag, obj.name))
if obj.wsize < 1:
raise Exception("At {} {}: 'wsize' should be a number greater than 0".format(xmle.tag, obj.name))
def _parse_base_module(self, xmle, obj):
self._parse_base_port(xmle, obj)
if "maxpar" in xmle.attrib:
try:
obj.maxpar = int(xmle.attrib["maxpar"])
except:
raise Exception("At {} {}: 'maxpar' should be a number greater than 0".format(xmle.tag, obj.name))
if obj.maxpar < 1:
raise Exception("At {} {}: 'maxpar' should be a number greater than 0".format(xmle.tag, obj.name))
conf_xml = xmle.find("conf")
if conf_xml is not None:
obj.conf = self._parse_conf(conf_xml)
res_xml = xmle.find("resources")
if res_xml is not None:
obj.resources = self._parse_conf(res_xml)
for x in xmle.findall("param"):
obj.params += [self._parse_param(x)]
for x in xmle.findall("in"):
obj.add_in_port(self._parse_port(x))
for x in xmle.findall("out"):
obj.add_out_port(self._parse_port(x))
def _parse_flow(self, xmle):
if xmle.tag != "flow":
raise Exception("<flow> expected but <{}> found".format(xmle.tag))
flow = Flow(name = None)
self._parse_base_module(xmle, flow)
if "library" in xmle.attrib:
flow.library = xmle.attrib["library"]
if "version" in xmle.attrib:
flow.version = xmle.attrib["version"]
for xmle in xmle.findall("module"):
module = self._parse_module(flow, xmle)
# TODO check that there is no other module with the same name
flow.add_module(module)
return flow
def _parse_module(self, flow, xmle):
mod = Module(name = None)
self._parse_base_module(xmle, mod)
if "depends" in xmle.attrib:
depends = [d.strip() for d in xmle.attrib["depends"].split(",")]
mod.depends = [d for d in depends if len(d) > 0]
exec_xml = xmle.find("exec")
if exec_xml is None:
run_xml = xmle.find("run")
if run_xml is None:
flow_ref_xml = xmle.find("flow")
if flow_ref_xml is None:
raise Exception("Missing either <exec>, <run> or <flow> in module {}".format(mod.name))
else:
mod.flow_ref = self._parse_flow_ref(flow, mod, flow_ref_xml)
else:
mod.execution = self._parse_run(mod, run_xml)
else:
mod.execution = self._parse_exec(exec_xml)
return mod
def _parse_param(self, xmle):
raise Exception("Unimplemented")
def _parse_port(self, xmle):
if xmle.tag == "in":
mode = PORT_MODE_IN
elif xmle.tag == "out":
mode = PORT_MODE_OUT
port = Port(name = None, mode = mode)
self._parse_base_port(xmle, port)
if "link" in xmle.attrib:
link = [x.strip() for x in xmle.attrib["link"].split(",")]
port.link = [l for l in link if len(l) > 0]
return port
def _parse_conf(self, xmle):
return Data.from_xmle(xmle)
def _parse_exec(self, xmle):
execution = Exec()
if "launcher" in xmle.attrib:
execution.mode = xmle.attrib["launcher"].lower()
if execution.mode == "python":
execution.mode = "native"
execution.conf = Data.from_xmle(xmle)
return execution
def _parse_run(self, mod, xmle):
if xmle.text is None or len(xmle.text) == 0:
raise Exception("Missing script name for <run> in module {}".format(mod.name))
execution = Exec()
execution.mode = "native"
execution.conf = DataElement()
execution.conf["script_path"] = xmle.text
return execution
def _parse_flow_ref(self, flow, mod, xmle):
if xmle.text is None or len(xmle.text) == 0:
raise Exception("Missing flow name for <flow> in module {}".format(mod.name))
flow_ref = FlowRef()
pos = xmle.text.rfind(".")
if pos == -1 and flow.library is not None:
flow_ref.canonical_name = "{}.{}".format(flow.library, xmle.text)
else:
flow_ref.canonical_name = xmle.text
if "version" in xmle.attrib:
flow_ref.version = xmle.attrib["version"]
return flow_ref
def close(self):
self.fp.close()
|
gpl-3.0
| 6,454,725,803,999,862,000
| 26.225681
| 102
| 0.641561
| false
| 3.003004
| false
| false
| false
|
arruda/rmr
|
rmr/apps/accounts/migrations/0001_initial.py
|
1
|
4385
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserProfile'
db.create_table('accounts_userprofile', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('quota', self.gf('django.db.models.fields.DecimalField')(default='0', null=True, max_digits=10, decimal_places=2, blank=True)),
))
db.send_create_signal('accounts', ['UserProfile'])
def backwards(self, orm):
# Deleting model 'UserProfile'
db.delete_table('accounts_userprofile')
models = {
'accounts.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'quota': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
|
mit
| 4,301,774,263,361,745,000
| 61.657143
| 182
| 0.560547
| false
| 3.76072
| false
| false
| false
|
Azure/azure-sdk-for-python
|
sdk/keyvault/azure-keyvault-keys/samples/backup_restore_operations_async.py
|
1
|
3785
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import os
from azure.keyvault.keys.aio import KeyClient
from azure.identity.aio import DefaultAzureCredential
from azure.core.exceptions import HttpResponseError
# ----------------------------------------------------------------------------------------------------------
# Prerequisites:
# 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli)
#
# 2. azure-keyvault-keys and azure-identity libraries (pip install these)
#
# 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL
# (See https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/keyvault/azure-keyvault-keys#authenticate-the-client)
#
# ----------------------------------------------------------------------------------------------------------
# Sample - demonstrates the basic backup and restore operations on a vault(key) resource for Azure Key Vault
#
# 1. Create a key (create_key)
#
# 2. Backup a key (backup_key)
#
# 3. Delete a key (delete_key)
#
# 4. Purge a key (purge_deleted_key)
#
# 5. Restore a key (restore_key_backup)
# ----------------------------------------------------------------------------------------------------------
async def run_sample():
# Instantiate a key client that will be used to call the service.
# Notice that the client is using default Azure credentials.
# To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID',
# 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials.
VAULT_URL = os.environ["VAULT_URL"]
credential = DefaultAzureCredential()
client = KeyClient(vault_url=VAULT_URL, credential=credential)
try:
# Let's create a Key of type RSA.
# if the key already exists in the Key Vault, then a new version of the key is created.
print("\n.. Create Key")
key = await client.create_key("keyName", "RSA")
print("Key with name '{0}' created with key type '{1}'".format(key.name, key.key_type))
# Backups are good to have, if in case keys gets deleted accidentally.
# For long term storage, it is ideal to write the backup to a file.
print("\n.. Create a backup for an existing Key")
key_backup = await client.backup_key(key.name)
print("Backup created for key with name '{0}'.".format(key.name))
# The rsa key is no longer in use, so you delete it.
deleted_key = await client.delete_key(key.name)
print("Deleted key with name '{0}'".format(deleted_key.name))
# Purge the deleted key.
# The purge will take some time, so wait before restoring the backup to avoid a conflict.
print("\n.. Purge the key")
await client.purge_deleted_key(key.name)
await asyncio.sleep(60)
print("Purged key with name '{0}'".format(deleted_key.name))
# In the future, if the key is required again, we can use the backup value to restore it in the Key Vault.
print("\n.. Restore the key using the backed up key bytes")
key = await client.restore_key_backup(key_backup)
print("Restored key with name '{0}'".format(key.name))
except HttpResponseError as e:
print("\nrun_sample has caught an error. {0}".format(e.message))
finally:
print("\nrun_sample done")
await credential.close()
await client.close()
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_sample())
loop.close()
except Exception as e:
print("Top level Error: {0}".format(str(e)))
|
mit
| -6,305,333,877,761,251,000
| 43.011628
| 123
| 0.60317
| false
| 3.988409
| false
| false
| false
|
masschallenge/django-accelerator
|
accelerator/tests/contexts/judge_feedback_context.py
|
1
|
13749
|
from accelerator_abstract.models import (
FORM_ELEM_FEEDBACK_TO_MC,
FORM_ELEM_FEEDBACK_TO_STARTUP,
FORM_ELEM_OVERALL_RECOMMENDATION,
)
from accelerator.models import (
ACTIVE_PROGRAM_STATUS,
ASSIGNED_PANEL_ASSIGNMENT_STATUS,
COMPLETE_PANEL_ASSIGNMENT_STATUS,
FEEDBACK_DISPLAY_DISABLED as DISABLED,
FEEDBACK_DISPLAY_ENABLED as ENABLED,
IN_PERSON_JUDGING_ROUND_TYPE,
ONLINE_JUDGING_ROUND_TYPE,
JUDGING_FEEDBACK_STATUS_INCOMPLETE as INCOMPLETE,
PREVIEW_PANEL_STATUS,
SUBMITTED_APP_STATUS,
UserRole,
)
from accelerator.tests.factories import (
ApplicationAnswerFactory,
ApplicationFactory,
ApplicationPanelAssignmentFactory,
ExpertFactory,
JudgeApplicationFeedbackFactory,
JudgeFeedbackComponentFactory,
JudgePanelAssignmentFactory,
JudgeRoundCommitmentFactory,
JudgingFormElementFactory,
PanelFactory,
ProgramCycleFactory,
ProgramRoleFactory,
ProgramRoleGrantFactory,
ScenarioFactory,
StartupCycleInterestFactory,
StartupProgramInterestFactory,
)
from accelerator.tests.contexts.context_utils import get_user_role_by_name
from .judging_round_context import JudgingRoundContext
ELEMENT_NAMES = [
FORM_ELEM_OVERALL_RECOMMENDATION,
FORM_ELEM_FEEDBACK_TO_STARTUP,
FORM_ELEM_FEEDBACK_TO_MC,
]
_round_type = {True: ONLINE_JUDGING_ROUND_TYPE,
False: IN_PERSON_JUDGING_ROUND_TYPE}
class JudgeFeedbackContext:
def __init__(self,
application=None,
num_components=1,
complete=True,
panel_status=PREVIEW_PANEL_STATUS,
display_feedback=False,
merge_feedback_with=None,
cycle_based_round=False,
online_round=True,
is_active=True,
judge_capacity=10,
program_status=ACTIVE_PROGRAM_STATUS):
self.judging_capacity = 0
if application:
self.application = application
self.cycle = application.cycle
else:
self.cycle = ProgramCycleFactory()
self.application = ApplicationFactory(
application_status=SUBMITTED_APP_STATUS,
application_type=self.cycle.default_application_type,
cycle=self.cycle)
self.application_type = self.application.application_type
self.applications = [self.application]
self.startup = self.application.startup
self.industry = self.startup.primary_industry
feedback_display = ENABLED if display_feedback else DISABLED
jr_kwargs = {
'program__cycle': self.cycle,
'round_type': _round_type[online_round],
'feedback_display': feedback_display,
'cycle_based_round': cycle_based_round,
'application_type': self.application_type,
'is_active': False,
'program__program_status': program_status,
}
if merge_feedback_with:
jr_kwargs['feedback_merge_with'] = merge_feedback_with
self.judging_round = JudgingRoundContext(**jr_kwargs).judging_round
self.program = self.judging_round.program
self.panel = PanelFactory(status=panel_status,
panel_time__judging_round=self.judging_round)
self.scenario = ScenarioFactory(judging_round=self.judging_round)
user_role = get_user_role_by_name(UserRole.JUDGE)
self.judge_role = ProgramRoleFactory(program=self.program,
user_role=user_role)
self.judges = []
self.judge = self.add_judge(complete=complete,
capacity=judge_capacity)
self.feedback = JudgeApplicationFeedbackFactory(
judge=self.judge,
application=self.application,
panel=self.panel,
form_type=self.judging_round.judging_form)
self.judging_form = self.feedback.form_type
self.application_assignment = ApplicationPanelAssignmentFactory(
application=self.application,
panel=self.panel,
scenario=self.scenario)
cycle_interest = StartupCycleInterestFactory(cycle=self.program.cycle,
startup=self.startup)
StartupProgramInterestFactory(program=self.program,
startup=self.startup,
startup_cycle_interest=cycle_interest,
applying=True,
order=1)
self.components = []
self.elements = []
self.application_questions = []
self.application_answers = []
for element_name in ELEMENT_NAMES:
self.add_component(element_name=element_name)
if complete:
self.feedback.save()
for _ in range(num_components):
self.add_component()
else:
for _ in range(num_components):
self.add_element()
self.judging_round.is_active = is_active
self.judging_round.save()
def add_application_answer(self, question=None, answer_text=None):
question = question or self.application_questions[0]
kwargs = {"application_question": question,
"application": self.application}
if answer_text:
kwargs["answer_text"] = answer_text
app_answer = ApplicationAnswerFactory(**kwargs)
self.application_answers.append(app_answer)
return app_answer
def add_component(self, element_name=None,
feedback_element=None,
add_answer=True,
answer_text=None):
factory_params = {
"judge_feedback": self.feedback, }
if feedback_element is None:
app_type_key = "__".join(["feedback_element",
"application_question",
"application_type"])
factory_params.update(
{
"feedback_element__form_type": self.judging_form,
"feedback_element__element_type": "feedback",
"feedback_element__mandatory": True,
"feedback_element__sharing": "share-with-startup",
app_type_key: self.application_type}
)
if element_name:
factory_params['feedback_element__element_name'] = element_name
else:
factory_params.update({"feedback_element": feedback_element})
if answer_text:
factory_params["answer_text"] = answer_text
component = JudgeFeedbackComponentFactory(
**factory_params)
self.components.append(component)
question = component.feedback_element.application_question
self.application_questions.append(question)
if add_answer:
app_answer = ApplicationAnswerFactory(
application_question=question,
application=self.application)
self.application_answers.append(app_answer)
if feedback_element is None:
self.elements.append(component.feedback_element)
self.feedback.save()
return component
def add_element(self,
feedback_type="",
element_type="feedback",
choice_layout="",
mandatory=True,
text_minimum=0,
text_minimum_units="",
answer_text=None,
text_limit=0,
text_limit_units=""):
element = JudgingFormElementFactory(
form_type=self.judging_form,
mandatory=mandatory,
element_type=element_type,
feedback_type=feedback_type,
choice_layout=choice_layout,
sharing="share-with-startup",
application_question__application_type=self.application_type,
text_minimum=text_minimum,
text_minimum_units=text_minimum_units,
text_limit=text_limit,
text_limit_units=text_limit_units,
)
application_question = element.application_question
self.application_questions.append(application_question)
answer_kwargs = {"application_question": application_question,
"application": self.application}
if answer_text:
answer_kwargs["answer_text"] = answer_text
application_answer = ApplicationAnswerFactory(**answer_kwargs)
self.application_answers.append(application_answer)
self.elements.append(element)
self.feedback.save()
return element
def add_extra_scenario(self):
return ScenarioFactory(judging_round=self.judging_round)
def add_panel(self):
return PanelFactory(
panel_time__judging_round=self.judging_round,
panel_type__judging_round=self.judging_round,
location__judging_round=self.judging_round)
def add_assignment(self,
judge=None,
panel=None,
scenario=None):
scenario = scenario or self.scenario
judge = judge or self.judge
panel = panel or self.panel
return JudgePanelAssignmentFactory(
judge=judge,
panel=panel,
scenario=scenario)
def add_feedback(self,
application=None,
judge=None,
panel=None,
feedback_status=INCOMPLETE):
judge = judge or self.judge
application = application or self.application
panel = panel or self.panel
if not panel.applicationpanelassignment_set.filter(
application=application).exists():
ApplicationPanelAssignmentFactory(
application=application,
panel=panel,
scenario=self.scenario)
return JudgeApplicationFeedbackFactory(
feedback_status=feedback_status,
judge=judge,
application=application,
panel=panel,
form_type=self.judging_round.judging_form)
def add_application(self,
application=None,
field=None,
option=None,
program=None):
program = program or self.program
if application is None:
fields = {
"application_status": SUBMITTED_APP_STATUS,
"application_type": self.application_type,
}
if field:
fields[field] = option
application = ApplicationFactory(**fields)
self.applications.append(application)
startup = application.startup
cycle_interest = StartupCycleInterestFactory(cycle=program.cycle,
startup=startup)
StartupProgramInterestFactory(program=program,
startup=startup,
startup_cycle_interest=cycle_interest,
applying=True,
order=1)
return application
def add_applications(self, count, field=None, options=[], programs=[]):
result = []
option_count = len(options)
option = None
program_count = len(programs)
program = None
for i in range(count):
if option_count > 0:
option = options[i % option_count]
if program_count > 0:
program = programs[i % program_count]
result.append(self.add_application(field=field,
option=option,
program=program))
return result
def add_judge(self,
assigned=True,
complete=True,
judge=None,
panel=None,
capacity=10):
if judge is None:
judge = ExpertFactory(
profile__primary_industry=self.industry,
profile__home_program_family=self.program.program_family)
ProgramRoleGrantFactory(person=judge, program_role=self.judge_role)
self.judging_round.confirmed_judge_label.users.add(judge)
JudgeRoundCommitmentFactory(judging_round=self.judging_round,
judge=judge,
capacity=10,
commitment_state=True)
self.judging_capacity += capacity
if assigned:
if complete:
status = COMPLETE_PANEL_ASSIGNMENT_STATUS
else:
status = ASSIGNED_PANEL_ASSIGNMENT_STATUS
JudgePanelAssignmentFactory(
judge=judge,
assignment_status=status,
panel=panel or self.panel,
scenario=self.scenario)
self.judges.append(judge)
return judge
@classmethod
def create_batch(cls, qty, *args, **kwargs):
if 'merge_feedback' in kwargs:
merge_feedback = kwargs.pop('merge_feedback')
else:
merge_feedback = False
contexts = [cls(*args, **kwargs)]
if merge_feedback:
kwargs['merge_feedback_with'] = contexts[0].judging_round
for _ in range(1, qty):
contexts.append(cls(*args, **kwargs))
return contexts
|
mit
| 4,584,632,868,787,122,000
| 38.852174
| 79
| 0.56695
| false
| 4.558687
| false
| false
| false
|
superbatlc/dtailweb
|
phonegroups/migrations/0001_initial.py
|
1
|
1870
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('systems', '0001_initial'),
('calls', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Phonegroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, verbose_name=b'Nome')),
('code', models.CharField(max_length=10, verbose_name=b'Codice')),
('parent', models.ForeignKey(related_name='child_phonegroup_set', blank=True, to='phonegroups.Phonegroup', help_text=b'The father of this group', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PhonegroupCall',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('call', models.ForeignKey(to='calls.Call')),
('phonegroup', models.ForeignKey(to='phonegroups.Phonegroup')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PhonegroupExtension',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('extension', models.CharField(max_length=4)),
('phonegroup', models.ForeignKey(to='phonegroups.Phonegroup')),
('system', models.ForeignKey(to='systems.System')),
],
options={
},
bases=(models.Model,),
),
]
|
gpl-2.0
| -5,535,782,326,201,843,000
| 36.4
| 174
| 0.536364
| false
| 4.452381
| false
| false
| false
|
hiraditya/fool
|
tensorflow/scaling-up-ml-using-cmle.py
|
1
|
6449
|
'''
In this lab, you will learn how to:
Package up TensorFlow model
Run training locally
Run training on cloud
Deploy model to cloud
Invoke model to carry out predictions
'''
'''
Scaling up ML using Cloud ML Engine
In this notebook, we take a previously developed TensorFlow model to predict taxifare rides and package it up so that it can be run in Cloud MLE. For now, we'll run this on a small dataset. The model that was developed is rather simplistic, and therefore, the accuracy of the model is not great either. However, this notebook illustrates how to package up a TensorFlow model to run it within Cloud ML.
Later in the course, we will look at ways to make a more effective machine learning model.
Environment variables for project and bucket
Note that:
Your project id is the unique string that identifies your project (not the project name). You can find this from the GCP Console dashboard's Home page. My dashboard reads: Project ID: cloud-training-demos
Cloud training often involves saving and restoring model files. If you don't have a bucket already, I suggest that you create one from the GCP console (because it will dynamically check whether the bucket name you want is available). A common pattern is to prefix the bucket name by the project id, so that it is unique. Also, for cost reasons, you might want to use a single region bucket.
Change the cell below to reflect your Project ID and bucket name.
'''
import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = '1.7' # Tensorflow version
%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%bash
PROJECT_ID=$PROJECT
AUTH_TOKEN=$(gcloud auth print-access-token)
SVC_ACCOUNT=$(curl -X GET -H "Content-Type: application/json" \
-H "Authorization: Bearer $AUTH_TOKEN" \
https://ml.googleapis.com/v1/projects/${PROJECT_ID}:getConfig \
| python -c "import json; import sys; response = json.load(sys.stdin); \
print response['serviceAccount']")
echo "Authorizing the Cloud ML Service account $SVC_ACCOUNT to access files in $BUCKET"
gsutil -m defacl ch -u $SVC_ACCOUNT:R gs://$BUCKET
gsutil -m acl ch -u $SVC_ACCOUNT:R -r gs://$BUCKET # error message (if bucket is empty) can be ignored
gsutil -m acl ch -u $SVC_ACCOUNT:W gs://$BUCKET
'''
Packaging up the code
Take your code and put into a standard Python package structure. model.py and task.py contain the Tensorflow code from earlier (explore the directory structure).
'''
!find taxifare
!cat taxifare/trainer/model.py
'''
Find absolute paths to your data
Note the absolute paths below. /content is mapped in Datalab to where the home icon takes you
'''
%bash
echo $PWD
rm -rf $PWD/taxi_trained
head -1 $PWD/taxi-train.csv
head -1 $PWD/taxi-valid.csv
'''
Running the Python module from the command-line
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python -m trainer.task \
--train_data_paths="${PWD}/taxi-train*" \
--eval_data_paths=${PWD}/taxi-valid.csv \
--output_dir=${PWD}/taxi_trained \
--train_steps=1000 --job-dir=./tmp
%bash
ls $PWD/taxi_trained/export/exporter/
%writefile ./test.json
{"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2}
%bash
model_dir=$(ls ${PWD}/taxi_trained/export/exporter)
gcloud ml-engine local predict \
--model-dir=${PWD}/taxi_trained/export/exporter/${model_dir} \
--json-instances=./test.json
'''
Running locally using gcloud
'''
%bash
rm -rf taxifare.tar.gz taxi_trained
gcloud ml-engine local train \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
-- \
--train_data_paths=${PWD}/taxi-train.csv \
--eval_data_paths=${PWD}/taxi-valid.csv \
--train_steps=1000 \
--output_dir=${PWD}/taxi_trained
'''
When I ran it (due to random seeds, your results will be different), the average_loss (Mean Squared Error) on the evaluation dataset was 187, meaning that the RMSE was around 13.
'''
from google.datalab.ml import TensorBoard
TensorBoard().start('./taxi_trained')
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print 'Stopped TensorBoard with pid {}'.format(pid)
'''
If the above step (to stop TensorBoard) appears stalled, just move on to the next step. You don't need to wait for it to return.
'''
!ls $PWD/taxi_trained
'''
Submit training job using gcloud
First copy the training data to the cloud. Then, launch a training job.
After you submit the job, go to the cloud console (http://console.cloud.google.com) and select Machine Learning | Jobs to monitor progress.
Note: Don't be concerned if the notebook stalls (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud. Use the Cloud Console link (above) to monitor the job.
https://cloud.google.com/ml-engine/docs/tensorflow/getting-started-training-prediction
'''
%bash
echo $BUCKET
gsutil -m rm -rf gs://${BUCKET}/taxifare/smallinput/
gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/taxifare/smallinput/
%%bash
OUTDIR=gs://${BUCKET}/taxifare/smallinput/taxi_trained
JOBNAME=lab3a_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/taxifare/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=$TFVERSION \
-- \
--train_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-train*" \
--eval_data_paths="gs://${BUCKET}/taxifare/smallinput/taxi-valid*" \
--output_dir=$OUTDIR \
--train_steps=10000
Job [lab3a_180607_192245] submitted successfully.
Your job is still active. You may view the status of your job with the command (on google cloud consile)
$ gcloud ml-engine jobs describe lab3a_180607_192245
or continue streaming the logs with the command
$ gcloud ml-engine jobs stream-logs lab3a_180607_192245
Use the Cloud Console link to monitor the job and do NOT proceed until the job is done.
|
mit
| 2,726,381,697,759,011,000
| 36.71345
| 401
| 0.728485
| false
| 3.317387
| false
| false
| false
|
wangyixiaohuihui/spark2-annotation
|
python/pyspark/mllib/stat/_statistics.py
|
1
|
13703
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = str
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Matrix, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.test import ChiSqTestResult, KolmogorovSmirnovTestResult
__all__ = ['MultivariateStatisticalSummary', 'Statistics']
class MultivariateStatisticalSummary(JavaModelWrapper):
"""
Trait for multivariate statistical summary of a data matrix.
"""
def mean(self):
return self.call("mean").toArray()
def variance(self):
return self.call("variance").toArray()
def count(self):
return int(self.call("count"))
def numNonzeros(self):
return self.call("numNonzeros").toArray()
def max(self):
return self.call("max").toArray()
def min(self):
return self.call("min").toArray()
def normL1(self):
return self.call("normL1").toArray()
def normL2(self):
return self.call("normL2").toArray()
class Statistics(object):
@staticmethod
def colStats(rdd):
"""
Computes column-wise summary statistics for the input RDD[Vector].
:param rdd: an RDD[Vector] for which column-wise summary statistics
are to be computed.
:return: :class:`MultivariateStatisticalSummary` object containing
column-wise summary statistics.
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]),
... Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8])])
>>> cStats = Statistics.colStats(rdd)
>>> cStats.mean()
array([ 4., 4., 0., 3.])
>>> cStats.variance()
array([ 4., 13., 0., 25.])
>>> cStats.count()
3
>>> cStats.numNonzeros()
array([ 3., 2., 0., 3.])
>>> cStats.max()
array([ 6., 7., 0., 8.])
>>> cStats.min()
array([ 2., 0., 0., -2.])
"""
cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector))
return MultivariateStatisticalSummary(cStats)
@staticmethod
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method)
@staticmethod
@ignore_unicode_prefix
def chiSqTest(observed, expected=None):
"""
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
.. note:: `observed` cannot contain negative values
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
>>> from pyspark.mllib.linalg import Vectors, Matrices
>>> observed = Vectors.dense([4, 6, 5])
>>> pearson = Statistics.chiSqTest(observed)
>>> print(pearson.statistic)
0.4
>>> pearson.degreesOfFreedom
2
>>> print(round(pearson.pValue, 4))
0.8187
>>> pearson.method
u'pearson'
>>> pearson.nullHypothesis
u'observed follows the same distribution as expected.'
>>> observed = Vectors.dense([21, 38, 43, 80])
>>> expected = Vectors.dense([3, 5, 7, 20])
>>> pearson = Statistics.chiSqTest(observed, expected)
>>> print(round(pearson.pValue, 4))
0.0027
>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
>>> print(round(chi.statistic, 4))
21.9958
>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),]
>>> rdd = sc.parallelize(data, 4)
>>> chi = Statistics.chiSqTest(rdd)
>>> print(chi[0].statistic)
0.75
>>> print(chi[1].statistic)
1.5
"""
if isinstance(observed, RDD):
if not isinstance(observed.first(), LabeledPoint):
raise ValueError("observed should be an RDD of LabeledPoint")
jmodels = callMLlibFunc("chiSqTest", observed)
return [ChiSqTestResult(m) for m in jmodels]
if isinstance(observed, Matrix):
jmodel = callMLlibFunc("chiSqTest", observed)
else:
if expected and len(expected) != len(observed):
raise ValueError("`expected` should have same length with `observed`")
jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected)
return ChiSqTestResult(jmodel)
@staticmethod
@ignore_unicode_prefix
def kolmogorovSmirnovTest(data, distName="norm", *params):
"""
Performs the Kolmogorov-Smirnov (KS) test for data sampled from
a continuous distribution. It tests the null hypothesis that
the data is generated from a particular distribution.
The given data is sorted and the Empirical Cumulative
Distribution Function (ECDF) is calculated
which for a given point is the number of points having a CDF
value lesser than it divided by the total number of points.
Since the data is sorted, this is a step function
that rises by (1 / length of data) for every ordered point.
The KS statistic gives us the maximum distance between the
ECDF and the CDF. Intuitively if this statistic is large, the
probabilty that the null hypothesis is true becomes small.
For specific details of the implementation, please have a look
at the Scala documentation.
:param data: RDD, samples from the data
:param distName: string, currently only "norm" is supported.
(Normal distribution) to calculate the
theoretical distribution of the data.
:param params: additional values which need to be provided for
a certain distribution.
If not provided, the default values are used.
:return: KolmogorovSmirnovTestResult object containing the test
statistic, degrees of freedom, p-value,
the method used, and the null hypothesis.
>>> kstest = Statistics.kolmogorovSmirnovTest
>>> data = sc.parallelize([-1.0, 0.0, 1.0])
>>> ksmodel = kstest(data, "norm")
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
>>> ksmodel.nullHypothesis
u'Sample follows theoretical distribution'
>>> data = sc.parallelize([2.0, 3.0, 4.0])
>>> ksmodel = kstest(data, "norm", 3.0, 1.0)
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD, got %s." % type(data))
if not isinstance(distName, basestring):
raise TypeError("distName should be a string, got %s." % type(distName))
params = [float(param) for param in params]
return KolmogorovSmirnovTestResult(
callMLlibFunc("kolmogorovSmirnovTest", data, distName, params))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.stat.statistics tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
| 1,669,562,013,303,584,300
| 40.821875
| 95
| 0.583157
| false
| 4.06376
| true
| false
| false
|
googleapis/googleapis-gen
|
google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/lead_form_desired_intent.py
|
1
|
1209
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'LeadFormDesiredIntentEnum',
},
)
class LeadFormDesiredIntentEnum(proto.Message):
r"""Describes the desired level of intent of generated leads. """
class LeadFormDesiredIntent(proto.Enum):
r"""Enum describing the desired level of intent of generated
leads.
"""
UNSPECIFIED = 0
UNKNOWN = 1
LOW_INTENT = 2
HIGH_INTENT = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -8,953,890,120,698,834,000
| 29.225
| 74
| 0.687345
| false
| 3.88746
| false
| false
| false
|
praekelt/jmbo-twitter
|
jmbo_twitter/models.py
|
1
|
5204
|
import datetime, twitter
from urllib2 import URLError
import logging
from django.db import models
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from jmbo.models import ModelBase
logger = logging.getLogger('django')
class Status(ModelBase):
"""Purely a wrapper that allows us to use jmbo-foundry's listings for
tweets."""
def __init__(self, status):
# Copy attributes over
attrs = ('contributors', 'coordinates', 'created_at', \
'created_at_in_seconds', 'favorited', 'geo', 'hashtags', 'id', \
'in_reply_to_screen_name', 'in_reply_to_status_id', \
'in_reply_to_user_id', 'location', 'now', 'place', \
'relative_created_at', 'retweet_count', 'retweeted', \
'retweeted_status', 'source', 'text', 'truncated', 'urls', 'user', \
'user_mentions', 'created_at_datetime')
for attr in attrs:
setattr(self, attr, getattr(status, attr))
@property
def as_leaf_class(self):
return self
def save(self):
raise NotImplemented
class StatusMixin(object):
def get_statuses(self, api):
raise NotImplemented
def fetch(self, force=False):
klass_name = self.__class__.__name__
cache_key = 'jmbo_twitter_%s_%s' % (klass_name, self.id)
cached = cache.get(cache_key, None)
if (cached is not None) and not force:
return cached
# Get and check settings
di = getattr(settings, 'JMBO_TWITTER', {})
ck = di.get('consumer_key')
cs = di.get('consumer_secret')
atk = di.get('access_token_key')
ats = di.get('access_token_secret')
if not all([ck, cs, atk, ats]):
logger.error(
'jmbo_twitter.models.%s.fetch - incomplete settings' \
% klass_name
)
return []
# Query twitter taking care to handle network errors
api = twitter.Api(
consumer_key=ck, consumer_secret=cs, access_token_key=atk,
access_token_secret=ats, requests_timeout=10
)
try:
statuses = self.get_statuses(api)
except (URLError, ValueError, twitter.TwitterError):
statuses = []
except Exception, e:
# All manner of things can go wrong with integration
logger.error(
'jmbo_twitter.models.%s.fetch - %s' % (klass_name, e.message)
)
statuses = []
for status in statuses:
status.created_at_datetime = datetime.datetime.fromtimestamp(
status.created_at_in_seconds
)
if statuses:
# Only set if there are statuses. Twitter may randomly throttle us
# and destroy our cache without this check. Cache for a long time
# incase Twitter goes down.
cache.set(cache_key, statuses, 86400)
# Legacy return
return statuses
@property
def fetched(self):
klass_name = self.__class__.__name__
cache_key = 'jmbo_twitter_%s_%s' % (klass_name, self.id)
return cache.get(cache_key, [])
@property
def tweets(self):
class MyList(list):
"""Slightly emulate QuerySet API so jmbo-foundry listings work"""
@property
def exists(self):
return len(self) > 0
result = []
for status in self.fetched:
result.append(Status(status))
return MyList(result)
class Feed(ModelBase, StatusMixin):
"""A feed represents a twitter user account"""
name = models.CharField(
max_length=255,
unique=True,
help_text="A twitter account name, eg. johnsmith"
)
profile_image_url = models.CharField(
null=True, editable=False, max_length=255
)
twitter_id = models.CharField(max_length=255, default='', editable=False)
def get_statuses(self, api):
# Fall back to slug for historical reasons
statuses = api.GetUserTimeline(
screen_name=self.name or self.slug, include_rts=True
)
return statuses
def fetch(self, force=False):
statuses = super(Feed, self).fetch(force=force)
if statuses:
# This is a convenient place to set the feed image url
status = statuses[0]
changed = False
if status.user.profile_image_url != self.profile_image_url:
self.profile_image_url = status.user.profile_image_url
changed = True
if status.user.name != self.title:
self.title = status.user.name
changed = True
if changed:
self.save()
return statuses
class Search(ModelBase, StatusMixin):
"""A search represents a twitter keyword search"""
criteria = models.CharField(
max_length=255,
unique=True,
help_text="Search string or a hashtag"
)
class Meta:
verbose_name_plural = _("Searches")
def get_statuses(self, api):
return api.GetSearch(self.criteria)
|
bsd-3-clause
| 148,431,581,819,713,100
| 30.349398
| 80
| 0.580515
| false
| 4.071987
| false
| false
| false
|
ronas/PythonGNF
|
Fabulao/PedidosCapa.py
|
1
|
3001
|
# -*- coding: latin -*-
import sys
#from PyQt5 import QtGui, QtCore, QtWidgets #, QTableWidget, QTableWidgetItem
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QTableWidgetItem, QLineEdit, QLabel
from PyQt5.QtCore import QSize, Qt
import pymysql
config = {
'host': 'localhost',
'port': 3306,
'database': 'LojaDB',
'user': 'root',
'password' : 'fbl1978'
}
class ClasseAPP(QWidget):
def __init__(self):
super(ClasseAPP, self).__init__()
self.initUI()
def initUI(self):
self.setWindowTitle('Pedidos')
self.resize(850, 400)
self.move(300, 200)
self.tabela = QTableWidget(3,5,self)
self.tabela.setGeometry(20,20,760,300)
self.tabela.setHorizontalHeaderLabels(('Numero Pedido','Data','Codigo Cliente','Telefone','Cond Pagamento'))
self.dbBuscarPedidos()
self.lblNumeroPedido = QLabel('Numero Pedido',self)
self.lblNumeroPedido.setGeometry(20,330,130,25)
self.lblData = QLabel('Data',self)
self.lblData.setGeometry(100.360,50,25)
#self.lblCodigoCliente = QLabel('Codigo Cliente',self)
#self.lblCodigoCliente.setGeometry()
#self.lblTelefone = QLabel('Telefone',self)
#self.lblTelefone.setGeometry()
#self.lblCondPagamento = QLabel('Cond Pagamento',self)
#self.lblCondPagamento.setGeometry()
self.txtNumeroPedido = QLineEdit(self)
self.txtNumeroPedido.setGeometry(130,330,130,25)
self.txtData = QLineEdit(self)
self.txtData.setGeometry(130,360,50,25)
#self.txtCodigoCliente = QLineEdit(self)
#self.txtCOdigoCliente.setGeometry()
#self.txtTelefone = QLineEdit(self)
#self.txtTelefone.setGeometry()
#self.txtCondPagamento = QLineEdit(self)
#self.txtCondPagamento.setGeometry()
self.tabela.resizeColumnsToContents()
self.show()
def dbBuscarPedidos(self):
db = pymysql.connect(**config)
cursor = db.cursor()
comando = ('select * from LojaDB.Pedidos ')
cursor.execute(comando )
self.tabela.setRowCount(0)
registros = cursor.fetchall()
for registro in registros:
numerolinhas = self.tabela.rowCount()
self.tabela.insertRow(numerolinhas)
self.tabela.setItem(numerolinhas, 0, QTableWidgetItem( str(registro[0]) ))
self.tabela.setItem(numerolinhas, 1, QTableWidgetItem( str(registro[1]) ))
self.tabela.setItem(numerolinhas, 2, QTableWidgetItem( registro[2] ))
self.tabela.setItem(numerolinhas, 3, QTableWidgetItem( str(registro[3]) ))
self.tabela.setItem(numerolinhas, 4, QTableWidgetItem( registro[4] ))
cursor.close()
db.close()
def main():
app = QApplication(sys.argv)
MeuApp = ClasseAPP()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
gpl-3.0
| -1,464,602,394,802,016,300
| 29.632653
| 116
| 0.625125
| false
| 3.286966
| false
| false
| false
|
caronc/newsreap
|
newsreap/Logging.py
|
1
|
6395
|
# -*- coding: utf-8 -*-
#
# Common Logging Parameters and Defaults
#
# Copyright (C) 2015-2017 Chris Caron <lead2gold@gmail.com>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# The first part of the file defines all of the namespacing
# used by this application
import sys
import logging
# We intentionally import this module so it preconfigures it's logging
# From there we can choose to manipulate it later without worrying about
# it's configuration over-riding ours; This creates a lint warning
# that we're importing a module we're not using; but this is intended.
# do not comment out or remove this entry
import sqlalchemy
# The default logger identifier used for general logging
NEWSREAP_LOGGER = 'newsreap'
# The default logger which displays backend engine and
# NNTP Server Interaction
NEWSREAP_ENGINE = '%s.engine' % NEWSREAP_LOGGER
# Codec Manipulation such as yEnc, uuencoded, etc
NEWSREAP_CODEC = '%s.codec' % NEWSREAP_LOGGER
# Users should utilize this for their hook logging
NEWSREAP_HOOKS = '%s.hooks' % NEWSREAP_LOGGER
# Command Line Interface Logger
NEWSREAP_CLI = '%s.cli' % NEWSREAP_LOGGER
# For a common reference point, we include the static logging
# Resource at the time for this information was:
# - http://docs.sqlalchemy.org/en/rel_1_0/core/engines.html#dbengine-logging
#
# namespaces used by SQLAlchemy
SQLALCHEMY_LOGGER = 'sqlalchemy'
# Defines the logger for the SQLAlchemy Engine
SQLALCHEMY_ENGINE = '%s.engine' % SQLALCHEMY_LOGGER
# Controls SQLAlchemy's connection pool logging.
SQLALCHEMY_POOL = '%s.pool' % SQLALCHEMY_LOGGER
# Controls SQLAlchemy's various Object Relational Mapping (ORM) logging.
SQLALCHEMY_ORM = '%s.orm' % SQLALCHEMY_LOGGER
# The number of bytes reached before automatically rotating the log file
# if this option was specified
# 5000000 bytes == 5 Megabytes
LOG_ROTATE_FILESIZE_BYTES = 5000000
def add_handler(logger, sendto=True, backupCount=5):
"""
Add handler to idenfied logger
sendto == None then logging is disabled
sendto == True then logging is put to stdout
sendto == False then logging is put to stderr
sendto == <string> then logging is routed to the filename specified
if sendto is a <string>, then backupCount defines the number of logs
to keep around. Set this to 0 or None if you don't wish the python
logger to backupCount the files ever. By default logs are rotated
once they reach 5MB
"""
if sendto is True:
# redirect to stdout
handler = logging.StreamHandler(sys.stdout)
elif sendto is False:
# redirect to stderr
handler = logging.StreamHandler(sys.stderr)
elif sendto is None:
# redirect to null
try:
handler = logging.NullHandler()
except AttributeError:
# Python <= v2.6
class NullHandler(logging.Handler):
def emit(self, record):
pass
handler = NullHandler()
# Set data to NOTSET just to eliminate the
# extra checks done internally
if logger.level != logging.NOTSET:
logger.setLevel(logging.NOTSET)
elif isinstance(sendto, basestring):
if backupCount is None:
handler = logging.FileHandler(filename=sendto)
elif isinstance(backupCount, int):
handler = logging.RotatingFileHandler(
filename=sendto,
maxBytes=LOG_ROTATE_FILESIZE_BYTES,
backupCount=backupCount,
)
else:
# We failed to add a handler
return False
# Setup Log Format
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s %(name)s %(message)s'))
# Add Handler
logger.addHandler(handler)
return True
def init(verbose=2, sendto=True, backupCount=5):
"""
Set's up some simple default handling to make it
easier for those wrapping this library.
You do not need to call this function if you
don't wnat to; ideally one might want to set up
things their own way.
"""
# Add our handlers at the parent level
add_handler(
logging.getLogger(SQLALCHEMY_LOGGER),
sendto=True,
backupCount=backupCount,
)
add_handler(
logging.getLogger(NEWSREAP_LOGGER),
sendto=True,
backupCount=backupCount,
)
if verbose:
set_verbosity(verbose=verbose)
def set_verbosity(verbose):
"""
A simple function one can use to set the verbosity of
the app.
"""
# Default
logging.getLogger(SQLALCHEMY_LOGGER).setLevel(logging.ERROR)
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_LOGGER).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_CLI).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.ERROR)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.ERROR)
# Handle Verbosity
if verbose > 0:
logging.getLogger(NEWSREAP_CLI).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.INFO)
if verbose > 1:
logging.getLogger(NEWSREAP_CLI).setLevel(logging.DEBUG)
logging.getLogger(NEWSREAP_HOOKS).setLevel(logging.DEBUG)
logging.getLogger(NEWSREAP_ENGINE).setLevel(logging.DEBUG)
if verbose > 2:
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.INFO)
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.INFO)
if verbose > 3:
logging.getLogger(NEWSREAP_CODEC).setLevel(logging.DEBUG)
if verbose > 4:
logging.getLogger(SQLALCHEMY_ENGINE).setLevel(logging.DEBUG)
# set initial level to WARN.
rootlogger = logging.getLogger(NEWSREAP_LOGGER)
if rootlogger.level == logging.NOTSET:
set_verbosity(-1)
|
gpl-3.0
| 3,607,045,367,080,225,300
| 31.794872
| 76
| 0.696638
| false
| 3.89227
| false
| false
| false
|
Mirantis/octane
|
octane/commands/sync_images.py
|
1
|
2791
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from octane.helpers.sync_glance_images import sync_glance_images
from octane.util import db
from octane.util import env as env_util
from octane.util import ssh
def prepare(orig_id, seed_id):
orig_env = environment_obj.Environment(orig_id)
seed_env = environment_obj.Environment(seed_id)
controller = env_util.get_one_controller(seed_env)
with tempfile.NamedTemporaryFile() as temp:
db.mysqldump_from_env(orig_env, ['keystone'], temp.name)
db.mysqldump_restore_to_env(seed_env, temp.name)
ssh.call(['keystone-manage', 'db_sync'],
node=controller, parse_levels=True)
for controller in env_util.get_controllers(seed_env):
ssh.call(['service', 'memcached', 'restart'], node=controller)
class SyncImagesCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
parser.add_argument(
'swift_ep', type=str,
help="Endpoint's name where swift-proxy service is listening on")
return parser
def take_action(self, parsed_args):
sync_glance_images(parsed_args.orig_id, parsed_args.seed_id,
parsed_args.swift_ep)
class SyncImagesPrepareCommand(cmd.Command):
"""Sync glance images between ORIG and SEED environments"""
def get_parser(self, prog_name):
parser = super(SyncImagesPrepareCommand, self).get_parser(prog_name)
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of seed environment")
return parser
def take_action(self, parsed_args):
prepare(parsed_args.orig_id, parsed_args.seed_id)
|
apache-2.0
| -4,300,452,264,042,446,300
| 36.716216
| 77
| 0.677893
| false
| 3.860304
| false
| false
| false
|
Wolnosciowiec/file-repository
|
client/bahub/bahubapp/handler/__init__.py
|
1
|
4733
|
from ..entity.definition import BackupDefinition
from ..service.client import FileRepositoryClient
from ..service.pipefactory import PipeFactory
from ..exceptions import ReadWriteException
from ..result import CommandExecutionResult
from logging import Logger
import string
import random
import subprocess
from shutil import copyfileobj
class BackupHandler:
""" Manages the process of backup and restore, interacts with different sources of backup data using adapters """
_client = None # type: FileRepositoryClient
_pipe_factory = None # type: PipeFactory
_logger = None # type: Logger
_definition = None
def __init__(self,
_client: FileRepositoryClient,
_pipe_factory: PipeFactory,
_logger: Logger,
_definition: BackupDefinition):
self._client = _client
self._pipe_factory = _pipe_factory
self._logger = _logger
self._definition = _definition
def perform_backup(self):
self._validate()
self._validate_running_command()
response = self._read()
if response.return_code != 0 and response.return_code is not None:
raise ReadWriteException('Backup source read error, use --debug and retry to investigate')
upload_response = self._client.send(response.stdout, self._get_definition())
response.process.wait(15)
response.stdout.close()
return upload_response
def perform_restore(self, version: str):
response = self._write(
self._read_from_storage(version)
)
response.process.wait()
self._logger.info('Waiting for process to finish')
if response.return_code is not None and response.return_code > 0:
raise ReadWriteException('Cannot write files to disk while restoring from backup. Errors: '
+ str(response.stderr.read().decode('utf-8')))
self._logger.info('No errors found, sending success information')
return '{"status": "OK"}'
def close(self):
self._logger.info('Finishing the process')
self._close()
def _get_definition(self) -> BackupDefinition:
return self._definition
def _execute_command(self, command: str, stdin=None) -> CommandExecutionResult:
"""
Executes a command on local machine, returning stdout as a stream, and streaming in the stdin (optionally)
"""
self._logger.debug('shell(' + command + ')')
process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE if stdin else None,
executable='/bin/bash',
shell=True)
if stdin:
self._logger.info('Copying stdin to process')
try:
copyfileobj(stdin, process.stdin)
except BrokenPipeError:
raise ReadWriteException(
'Cannot write to process, broken pipe occurred, probably a tar process died. '
+ str(process.stdin.read()) + str(process.stderr.read())
)
process.stdin.close()
return CommandExecutionResult(process.stdout, process.stderr, process.returncode, process)
def _validate_running_command(self):
""" Validate if the command really exports the data, does not end up with an error """
response = self._read()
response.stdout.read(1024)
response.process.kill()
response.process.wait(15)
if response.process.returncode > 0:
raise ReadWriteException(
'The process exited with incorrect code, try to verify the command in with --debug switch'
)
def _validate(self):
raise Exception('_validate() not implemented for handler')
def _read(self) -> CommandExecutionResult:
""" TAR output or file stream buffered from ANY source for example """
raise Exception('_read() not implemented for handler')
def _write(self, stream) -> CommandExecutionResult:
""" A file stream or tar output be written into the storage. May be OpenSSL encoded, depends on definition """
raise Exception('_write() not implemented for handler')
def _read_from_storage(self, version: str):
return self._client.fetch(version, self._get_definition())
def _close(self):
pass
@staticmethod
def generate_id(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
|
lgpl-3.0
| -7,351,242,393,543,664,000
| 35.129771
| 118
| 0.616734
| false
| 4.761569
| false
| false
| false
|
aerokappa/SantaClaus
|
handCodedOptimum_v4.py
|
1
|
2216
|
import numpy as np
import pandas as pd
from processInput import processInput
def handCodedOptimum_v4 ( ):
fileName = 'gifts.csv'
giftList, giftListSummary = processInput( fileName )
packedBags = []
for i in np.arange(1000):
print i
currentBag = []
if (i< 333):
itemCount = np.array([0 ,3 ,0 ,0 ,0 ,0 ,0 ,3 ,0])
elif ((i>=333) & (i<458)):
itemCount = np.array([8, 0, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=458) & (i<583)):
itemCount = np.array([0, 0, 0, 0, 0, 0, 8, 0, 0])
elif ((i>=583) & (i<916)):
itemCount = np.array([0, 0, 0, 3, 0, 2, 0, 0, 0])
elif ((i>=916) & (i<924)):
itemCount = np.array([ 0, 0, 0, 0, 0, 0, 0, 0, 25])
elif ((i>=924) & (i<928)):
itemCount = np.array([ 0, 23, 0, 0, 0, 0, 0, 0, 0])
elif ((i>=928) & (i<938)):
itemCount = np.array([ 0, 0, 0, 0, 0, 19, 0, 0, 0])
elif ((i>=938) & (i<939)):
itemCount = np.array([ 0, 0, 0, 0, 0, 11, 0, 1, 0])
elif ((i>=939) & (i<940)):
itemCount = np.array([0, 9, 0, 1, 0, 0, 0, 0, 0])
else:
itemCount = np.array([0, 0, 1, 0, 0, 5, 0, 0, 0])
for i in np.arange(len(itemCount)):
if (itemCount[i] <= giftListSummary['nGiftsNotPacked'][i]):
for j in np.arange(itemCount[i]):
giftName = giftListSummary['GiftType'][i]
currGiftID = giftListSummary['nGiftsPacked'][i]
currentBag.append(giftName+'_'+str(currGiftID))
giftListSummary['nGiftsPacked'][i] += 1
giftListSummary['nGiftsNotPacked'][i] -= 1
packedBags.append(currentBag)
# Write to File 'submission.csv'
subFile = open('submission_5.csv','w')
subFile.write('Gifts\n')
for currentBag in packedBags:
subFile.write(currentBag[0])
for currentItem in currentBag[1:]:
subFile.write(' ')
subFile.write(currentItem)
subFile.write('\n')
subFile.close()
return packedBags
|
mit
| -6,132,476,382,293,829,000
| 35.344262
| 73
| 0.476083
| false
| 3.014966
| false
| false
| false
|
Jbonnett/Mutagen-flo
|
mutagen/ogg.py
|
1
|
17770
|
# Copyright 2006 Joe Wreschnig
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# $Id: ogg.py 3975 2007-01-13 21:51:17Z piman $
"""Read and write Ogg bitstreams and pages.
This module reads and writes a subset of the Ogg bitstream format
version 0. It does *not* read or write Ogg Vorbis files! For that,
you should use mutagen.oggvorbis.
This implementation is based on the RFC 3533 standard found at
http://www.xiph.org/ogg/doc/rfc3533.txt.
"""
import struct
import sys
import zlib
from cStringIO import StringIO
from mutagen import FileType
from mutagen._util import cdata, insert_bytes, delete_bytes, WrappedFileobj
class error(IOError):
"""Ogg stream parsing errors."""
pass
class OggPage(object):
"""A single Ogg page (not necessarily a single encoded packet).
A page is a header of 26 bytes, followed by the length of the
data, followed by the data.
The constructor is givin a file-like object pointing to the start
of an Ogg page. After the constructor is finished it is pointing
to the start of the next page.
Attributes:
version -- stream structure version (currently always 0)
position -- absolute stream position (default -1)
serial -- logical stream serial number (default 0)
sequence -- page sequence number within logical stream (default 0)
offset -- offset this page was read from (default None)
complete -- if the last packet on this page is complete (default True)
packets -- list of raw packet data (default [])
Note that if 'complete' is false, the next page's 'continued'
property must be true (so set both when constructing pages).
If a file-like object is supplied to the constructor, the above
attributes will be filled in based on it.
"""
version = 0
__type_flags = 0
position = 0L
serial = 0
sequence = 0
offset = None
complete = True
def __init__(self, fileobj=None):
self.packets = []
if fileobj is None:
return
self.offset = fileobj.tell()
header = fileobj.read(27)
if len(header) == 0:
raise EOFError
try:
(oggs, self.version, self.__type_flags, self.position,
self.serial, self.sequence, crc, segments) = struct.unpack(
"<4sBBqIIiB", header)
except struct.error:
raise error("unable to read full header; got %r" % header)
if oggs != "OggS":
raise error("read %r, expected %r, at 0x%x" % (
oggs, "OggS", fileobj.tell() - 27))
if self.version != 0:
raise error("version %r unsupported" % self.version)
total = 0
lacings = []
lacing_bytes = fileobj.read(segments)
if len(lacing_bytes) != segments:
raise error("unable to read %r lacing bytes" % segments)
for c in map(ord, lacing_bytes):
total += c
if c < 255:
lacings.append(total)
total = 0
if total:
lacings.append(total)
self.complete = False
self.packets = map(fileobj.read, lacings)
if map(len, self.packets) != lacings:
raise error("unable to read full data")
def __eq__(self, other):
"""Two Ogg pages are the same if they write the same data."""
try:
return (self.write() == other.write())
except AttributeError:
return False
__hash__ = object.__hash__
def __repr__(self):
attrs = ['version', 'position', 'serial', 'sequence', 'offset',
'complete', 'continued', 'first', 'last']
values = ["%s=%r" % (attr, getattr(self, attr)) for attr in attrs]
return "<%s %s, %d bytes in %d packets>" % (
type(self).__name__, " ".join(values), sum(map(len, self.packets)),
len(self.packets))
def write(self):
"""Return a string encoding of the page header and data.
A ValueError is raised if the data is too big to fit in a
single page.
"""
data = [
struct.pack("<4sBBqIIi", "OggS", self.version, self.__type_flags,
self.position, self.serial, self.sequence, 0)
]
lacing_data = []
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
lacing_data.append("\xff" * quot + chr(rem))
lacing_data = "".join(lacing_data)
if not self.complete and lacing_data.endswith("\x00"):
lacing_data = lacing_data[:-1]
data.append(chr(len(lacing_data)))
data.append(lacing_data)
data.extend(self.packets)
data = "".join(data)
# Python's CRC is swapped relative to Ogg's needs.
crc = ~zlib.crc32(data.translate(cdata.bitswap), -1)
# Although we're using to_int_be, this actually makes the CRC
# a proper le integer, since Python's CRC is byteswapped.
crc = cdata.to_int_be(crc).translate(cdata.bitswap)
data = data[:22] + crc + data[26:]
return data
def __size(self):
size = 27 # Initial header size
for datum in self.packets:
quot, rem = divmod(len(datum), 255)
size += quot + 1
if not self.complete and rem == 0:
# Packet contains a multiple of 255 bytes and is not
# terminated, so we don't have a \x00 at the end.
size -= 1
size += sum(map(len, self.packets))
return size
size = property(__size, doc="Total frame size.")
def __set_flag(self, bit, val):
mask = 1 << bit
if val: self.__type_flags |= mask
else: self.__type_flags &= ~mask
continued = property(
lambda self: cdata.test_bit(self.__type_flags, 0),
lambda self, v: self.__set_flag(0, v),
doc="The first packet is continued from the previous page.")
first = property(
lambda self: cdata.test_bit(self.__type_flags, 1),
lambda self, v: self.__set_flag(1, v),
doc="This is the first page of a logical bitstream.")
last = property(
lambda self: cdata.test_bit(self.__type_flags, 2),
lambda self, v: self.__set_flag(2, v),
doc="This is the last page of a logical bitstream.")
def renumber(klass, fileobj, serial, start):
"""Renumber pages belonging to a specified logical stream.
fileobj must be opened with mode r+b or w+b.
Starting at page number 'start', renumber all pages belonging
to logical stream 'serial'. Other pages will be ignored.
fileobj must point to the start of a valid Ogg page; any
occuring after it and part of the specified logical stream
will be numbered. No adjustment will be made to the data in
the pages nor the granule position; only the page number, and
so also the CRC.
If an error occurs (e.g. non-Ogg data is found), fileobj will
be left pointing to the place in the stream the error occured,
but the invalid data will be left intact (since this function
does not change the total file size).
"""
number = start
while True:
try: page = OggPage(fileobj)
except EOFError:
break
else:
if page.serial != serial:
# Wrong stream, skip this page.
continue
# Changing the number can't change the page size,
# so seeking back based on the current size is safe.
fileobj.seek(-page.size, 1)
page.sequence = number
fileobj.write(page.write())
fileobj.seek(page.offset + page.size, 0)
number += 1
renumber = classmethod(renumber)
def to_packets(klass, pages, strict=False):
"""Construct a list of packet data from a list of Ogg pages.
If strict is true, the first page must start a new packet,
and the last page must end the last packet.
"""
serial = pages[0].serial
sequence = pages[0].sequence
packets = []
if strict:
if pages[0].continued:
raise ValueError("first packet is continued")
if not pages[-1].complete:
raise ValueError("last packet does not complete")
elif pages and pages[0].continued:
packets.append("")
for page in pages:
if serial != page.serial:
raise ValueError("invalid serial number in %r" % page)
elif sequence != page.sequence:
raise ValueError("bad sequence number in %r" % page)
else: sequence += 1
if page.continued: packets[-1] += page.packets[0]
else: packets.append(page.packets[0])
packets.extend(page.packets[1:])
return packets
to_packets = classmethod(to_packets)
def from_packets(klass, packets, sequence=0,
default_size=4096, wiggle_room=2048):
"""Construct a list of Ogg pages from a list of packet data.
The algorithm will generate pages of approximately
default_size in size (rounded down to the nearest multiple of
255). However, it will also allow pages to increase to
approximately default_size + wiggle_room if allowing the
wiggle room would finish a packet (only one packet will be
finished in this way per page; if the next packet would fit
into the wiggle room, it still starts on a new page).
This method reduces packet fragmentation when packet sizes are
slightly larger than the default page size, while still
ensuring most pages are of the average size.
Pages are numbered started at 'sequence'; other information is
uninitialized.
"""
chunk_size = (default_size // 255) * 255
pages = []
page = OggPage()
page.sequence = sequence
for packet in packets:
page.packets.append("")
while packet:
data, packet = packet[:chunk_size], packet[chunk_size:]
if page.size < default_size and len(page.packets) < 255:
page.packets[-1] += data
else:
# If we've put any packet data into this page yet,
# we need to mark it incomplete. However, we can
# also have just started this packet on an already
# full page, in which case, just start the new
# page with this packet.
if page.packets[-1]:
page.complete = False
if len(page.packets) == 1:
page.position = -1L
else:
page.packets.pop(-1)
pages.append(page)
page = OggPage()
page.continued = not pages[-1].complete
page.sequence = pages[-1].sequence + 1
page.packets.append(data)
if len(packet) < wiggle_room:
page.packets[-1] += packet
packet = ""
if page.packets:
pages.append(page)
return pages
from_packets = classmethod(from_packets)
def replace(klass, fileobj, old_pages, new_pages):
"""Replace old_pages with new_pages within fileobj.
old_pages must have come from reading fileobj originally.
new_pages are assumed to have the 'same' data as old_pages,
and so the serial and sequence numbers will be copied, as will
the flags for the first and last pages.
fileobj will be resized and pages renumbered as necessary. As
such, it must be opened r+b or w+b.
"""
# Number the new pages starting from the first old page.
first = old_pages[0].sequence
for page, seq in zip(new_pages, range(first, first + len(new_pages))):
page.sequence = seq
page.serial = old_pages[0].serial
new_pages[0].first = old_pages[0].first
new_pages[0].last = old_pages[0].last
new_pages[0].continued = old_pages[0].continued
new_pages[-1].first = old_pages[-1].first
new_pages[-1].last = old_pages[-1].last
new_pages[-1].complete = old_pages[-1].complete
if not new_pages[-1].complete and len(new_pages[-1].packets) == 1:
new_pages[-1].position = -1L
new_data = "".join(map(klass.write, new_pages))
# Make room in the file for the new data.
delta = len(new_data)
fileobj.seek(old_pages[0].offset, 0)
insert_bytes(fileobj, delta, old_pages[0].offset)
fileobj.seek(old_pages[0].offset, 0)
fileobj.write(new_data)
new_data_end = old_pages[0].offset + delta
# Go through the old pages and delete them. Since we shifted
# the data down the file, we need to adjust their offsets. We
# also need to go backwards, so we don't adjust the deltas of
# the other pages.
old_pages.reverse()
for old_page in old_pages:
adj_offset = old_page.offset + delta
delete_bytes(fileobj, old_page.size, adj_offset)
# Finally, if there's any discrepency in length, we need to
# renumber the pages for the logical stream.
if len(old_pages) != len(new_pages):
fileobj.seek(new_data_end, 0)
serial = new_pages[-1].serial
sequence = new_pages[-1].sequence + 1
klass.renumber(fileobj, serial, sequence)
replace = classmethod(replace)
def find_last(klass, fileobj, serial):
"""Find the last page of the stream 'serial'.
If the file is not multiplexed this function is fast. If it is,
it must read the whole the stream.
This finds the last page in the actual file object, or the last
page in the stream (with eos set), whichever comes first.
"""
# For non-muxed streams, look at the last page.
try: fileobj.seek(-256*256, 2)
except IOError:
# The file is less than 64k in length.
fileobj.seek(0)
data = fileobj.read()
try: index = data.rindex("OggS")
except ValueError:
raise error("unable to find final Ogg header")
stringobj = StringIO(data[index:])
best_page = None
try:
page = OggPage(stringobj)
except error:
pass
else:
if page.serial == serial:
if page.last: return page
else: best_page = page
else: best_page = None
# The stream is muxed, so use the slow way.
fileobj.seek(0)
try:
page = OggPage(fileobj)
while not page.last:
page = OggPage(fileobj)
while page.serial != serial:
page = OggPage(fileobj)
best_page = page
return page
except error:
return best_page
except EOFError:
return best_page
find_last = classmethod(find_last)
class OggFileType(FileType):
"""An generic Ogg file."""
_Info = None
_Tags = None
_Error = None
_mimes = ["application/ogg", "application/x-ogg"]
def load(self, filename):
"""Load file information from a filename."""
self.filename = filename
fileobj = WrappedFileobj(filename, "rb")
try:
try:
self.info = self._Info(fileobj)
self.tags = self._Tags(fileobj, self.info)
if self.info.length:
# The streaminfo gave us real length information,
# don't waste time scanning the Ogg.
return
last_page = OggPage.find_last(fileobj, self.info.serial)
samples = last_page.position
try:
denom = self.info.sample_rate
except AttributeError:
denom = self.info.fps
self.info.length = samples / float(denom)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def delete(self, filename=None):
"""Remove tags from a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
self.tags.clear()
fileobj = WrappedFileobj(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
def save(self, filename=None):
"""Save a tag to a file.
If no filename is given, the one most recently loaded is used.
"""
if filename is None:
filename = self.filename
fileobj = WrappedFileobj(filename, "rb+")
try:
try: self.tags._inject(fileobj)
except error, e:
raise self._Error, e, sys.exc_info()[2]
except EOFError:
raise self._Error, "no appropriate stream found"
finally:
fileobj.close()
|
gpl-2.0
| -2,453,146,239,836,660,000
| 34.54
| 79
| 0.570568
| false
| 4.24105
| false
| false
| false
|
arcticfoxnv/slackminion
|
slackminion/plugin/base.py
|
1
|
4847
|
from six import string_types
from builtins import object
import logging
import threading
from slackminion.slack import SlackChannel, SlackIM, SlackUser, SlackRoom
class BasePlugin(object):
def __init__(self, bot, **kwargs):
self.log = logging.getLogger(type(self).__name__)
self._bot = bot
self._dont_save = False # By default, we want to save a plugin's state during save_state()
self._state_handler = False # State storage backends should set this to true
self._timer_callbacks = {}
self.config = {}
if 'config' in kwargs:
self.config = kwargs['config']
def on_load(self):
"""
Executes when a plugin is loaded.
Override this if your plugin needs to do initialization when loading.
Do not use this to restore runtime changes to variables -- they will be overwritten later on by
PluginManager.load_state()
"""
return True
def on_unload(self):
"""
Executes when a plugin is unloaded.
Override this if your plugin needs to do cleanup when unloading.
"""
return True
def on_connect(self):
"""
Executes immediately after connecting to slack.
Will not fire on reconnects.
"""
return True
def send_message(self, channel, text, thread=None, reply_broadcast=False):
"""
Used to send a message to the specified channel.
* channel - can be a channel or user
* text - message to send
* thread - thread to reply in
* reply_broadcast - whether or not to also send the message to the channel
"""
self.log.debug('Sending message to channel {} of type {}'.format(channel, type(channel)))
if isinstance(channel, SlackIM) or isinstance(channel, SlackUser):
self._bot.send_im(channel, text)
elif isinstance(channel, SlackRoom):
self._bot.send_message(channel, text, thread, reply_broadcast)
elif isinstance(channel, string_types):
if channel[0] == '@':
self._bot.send_im(channel[1:], text)
elif channel[0] == '#':
self._bot.send_message(channel[1:], text, thread, reply_broadcast)
else:
self._bot.send_message(channel, text, thread, reply_broadcast)
else:
self._bot.send_message(channel, text, thread, reply_broadcast)
def start_timer(self, duration, func, *args):
"""
Schedules a function to be called after some period of time.
* duration - time in seconds to wait before firing
* func - function to be called
* args - arguments to pass to the function
"""
self.log.info("Scheduling call to %s in %ds: %s", func.__name__, duration, args)
if self._bot.runnable:
t = threading.Timer(duration, self._timer_callback, (func, args))
self._timer_callbacks[func] = t
self._bot.timers.append(t)
t.start()
self.log.info("Scheduled call to %s in %ds", func.__name__, duration)
else:
self.log.warning("Not scheduling call to %s in %ds because we're shutting down.", func.__name__, duration)
def stop_timer(self, func):
"""
Stops a timer if it hasn't fired yet
* func - the function passed in start_timer
"""
self.log.debug('Stopping timer {}'.format(func.__name__))
if func in self._timer_callbacks:
t = self._timer_callbacks[func]
self._bot.timers.remove(t)
t.cancel()
del self._timer_callbacks[func]
def _timer_callback(self, func, args):
self.log.debug('Executing timer function {}'.format(func.__name__))
try:
func(*args)
except Exception:
self.log.exception("Caught exception executing timer function: {}".format(func.__name__))
def get_user(self, username):
"""
Utility function to query slack for a particular user
:param username: The username of the user to lookup
:return: SlackUser object or None
"""
if hasattr(self._bot, 'user_manager'):
user = self._bot.user_manager.get_by_username(username)
if user:
return user
user = SlackUser.get_user(self._bot.sc, username)
self._bot.user_manager.set(user)
return user
return SlackUser.get_user(self._bot.sc, username)
def get_channel(self, channel):
"""
Utility function to query slack for a particular channel
:param channel: The channel name or id of the channel to lookup
:return: SlackChannel object or None
"""
return SlackChannel.get_channel(self._bot.sc, channel)
|
mit
| -3,488,926,449,185,535,000
| 36
| 118
| 0.595007
| false
| 4.27425
| false
| false
| false
|
linxdcn/iS3
|
IS3Py/is3.py
|
2
|
7512
|
# Copyright (C) 2015 iS3 Software Foundation
# Author: Xiaojun Li
# Contact: xiaojunli@tongji.edu.cn
import sys
import clr
import System
# Load System.Windows.Media in PresentationCore.dll
sys.path.append('C:\\Program Files (x86)\\Reference Assemblies\\Microsoft\\Framework\\.NETFramework\\v4.5')
prcore = clr.LoadAssemblyFromFile('PresentationCore.dll')
clr.AddReference(prcore)
# Import classes in System
from System import Func,Action
from System.Windows.Media import Colors
from System.Collections.ObjectModel import ObservableCollection
from System.Threading.Tasks import Task
# Load IS3 namespaces
iS3Core = clr.LoadAssemblyFromFile('IS3.Core.dll')
clr.AddReference(iS3Core)
# Import classes in IS3
from IS3.Core import (Globals, Runtime, ErrorReport, ErrorReportTarget,
DGObject, DGObjects,
ProjectDefinition, Project,
EngineeringMap, EngineeringMapType, DrawShapeType,
IView, LayerDef, Domain, DomainType, ToolTreeItem)
from IS3.Core.Geometry import *
from IS3.Core.Graphics import *
def output(text):
print(text)
# Redirect ErrorReport to python cosole
ErrorReport.target = ErrorReportTarget.DelegateConsole
ErrorReport.consoleDelegate = output
# In Windows, UI thread vars and functions are restricted to other threads.
# So, be caution with python calls to functions in UI thread.
# Classes in the main UI thread include: mainframe, view, layer, ...
# Therefore, calling to functions in mainframe, view, layer etc. are restricted.
mainframe = Globals.mainframe # Global var: mainframe
prj = mainframe.prj # Global var: prj
dispatcher = mainframe.Dispatcher # Global var: dispatcher -> UI thread manager
graphicsEngine = Runtime.graphicEngine # Global var: graphics Engine
geometryEngine = Runtime.geometryEngine # Global var: geometry Engine
class MainframeWrapper():
"Define thread safe calls to mainframe methods"
@staticmethod
def addView(emap, canClose = True):
"A thread safe call to -> mainframe.addView(emap, canclose)"
if (Globals.isThreadUnsafe()):
func = Func[EngineeringMap, bool, Task[IView]](mainframe.addView)
view = dispatcher.Invoke(func, emap, canClose)
else:
view = mainframe.addView(emap, canClose)
viewWrapper = ViewWrapper(view.Result)
return viewWrapper
@staticmethod
def loadDomainPanels():
"A thread safe call to -> mainframe.loadDomainPanels()"
if (Globals.isThreadUnsafe()):
dispatcher.Invoke(mainframe.loadDomainPanels)
else:
mainframe.loadDomainPanels()
class ViewWrapper():
"Define thread safe calls to IS3View methods"
def __init__(self, view):
self.view = view
def addLayer(self, layer):
"A thread safe call to -> IS3View.addLayer"
if (Globals.isThreadUnsafe()):
func = Action[IGraphicsLayer](self.view.addLayer)
dispatcher.Invoke(func, layer)
else:
self.view.addLayer(layer)
def addLocalTiledLayer(self, file, id):
"A thread safe call to -> IS3View.addLocalTiledLayer"
if (Globals.isThreadUnsafe()):
func = Action[str, str](self.view.addLocalTiledLayer)
dispatcher.Invoke(func, file, id)
else:
self.view.addLocalTiledLayer(file, id)
def addGdbLayer(self, layerDef, gdbFile, start = 0, maxFeatures = 0):
"A thread safe call to -> IS3View.addGdbLayer"
if (Globals.isThreadUnsafe()):
func = Func[LayerDef, str, int, int, Task[IGraphicsLayer]](self.view.addGdbLayer)
layer = dispatcher.Invoke(func, layerDef, gdbFile, start, maxFeatures)
else:
layer = self.view.addGdbLayer(layerDef, gdbFile, start, maxFeatures)
layerWrapper = GraphicsLayerWrapper(layer.Result)
return layerWrapper
def addShpLayer(self, layerDef, shpFile, start = 0, maxFeatures = 0):
"A thread safe call to -> IS3View.addShpLayer"
if (Globals.isThreadUnsafe()):
func = Func[LayerDef, str, int, int, Task[IGraphicsLayer]](self.view.addShpLayer)
layer = dispatcher.Invoke(func, layerDef, shpFile, start, maxFeatures)
else:
self.view.addShpLayer(layerDef, shpFile, start, maxFeatures)
layerWrapper = GraphicsLayerWrapper(layer.Result)
return layerWrapper
def selectByRect(self):
"A thread safe call to -> IS3View.selectByRect"
if (Globals.isThreadUnsafe()):
dispatcher.Invoke(self.view.selectByRect)
else:
self.view.selectByRect()
class GraphicsLayerWrapper():
"Define thread safe calls to IS3GraphicsLayer methods"
def __init__(self, glayer):
self.layer = glayer
def setRenderer(self, renderer):
"A thread safe call to -> IS3GraphicsLayer.setRenderer"
if (Globals.isThreadUnsafe()):
func = Action[IRenderer](self.layer.setRenderer)
dispatcher.Invoke(func, renderer)
else:
self.layer.setRenderer(renderer)
def addGraphic(self, graphic):
"A thread safe call to -> IS3GraphicsLayer.addGraphic"
if (Globals.isThreadUnsafe()):
func = Action[IGraphic](self.layer.addGraphic)
dispatcher.Invoke(func, graphic)
else:
self.layer.addGraphic(graphic)
def newGraphicsLayer(id, displayName):
layer = graphicsEngine.newGraphicsLayer(id, displayName)
layerWrapper = GraphicsLayerWrapper(layer)
return layerWrapper
def addView3d(id, file):
map3d = EngineeringMap()
map3d.MapID = id
map3d.MapType = EngineeringMapType.Map3D
map3d.LocalMapFileName = file
view3d = MainframeWrapper.addView(map3d, True)
return view3d
def addGdbLayer(viewWrapper, layerDef, gdbFile = None, start = 0, maxFeatures = 0):
prj = Globals.project
layerWrapper = viewWrapper.addGdbLayer(layerDef, gdbFile, start, maxFeatures)
if (layerWrapper.layer == None):
print('addGdbFileELayer failed: ' + layerDef.Name)
return None
else:
print('addGdbFileELayer succeeded: ' + layerDef.Name)
objs = prj.findObjects(layerDef.Name)
if (objs == None):
print('Layer ' + layerDef.Name + ' has no corresponding objects in the project.')
else:
count = layerWrapper.layer.syncObjects(objs)
print('Sync with ' + str(count) + ' objects for layer ' + layerDef.Name)
return layerWrapper
def addGdbLayerLazy(view, name, type, gdbFile = None, start = 0, maxFeatures = 0):
layerDef = LayerDef()
layerDef.Name = name
layerDef.GeometryType = type
layerWrapper = addGdbLayer(view, layerDef, gdbFile, start, maxFeatures)
return layerWrapper
def addShpLayer(viewWrapper, layerDef, shpfile, start = 0, maxFeatures = 0):
prj = Globals.project
layerWrapper = viewWrapper.addShpLayer(layerDef, shpfile, start, maxFeatures)
if (layerWrapper.layer == None):
print('addShpFileELayer failed: ' + layerDef.Name)
return None
else:
print('addShpFileELayer succeeded: ' + layerDef.Name)
objs = prj.findObjects(layerDef.Name)
if (objs == None):
print('Layer ' + layerDef.Name + ' has no corresponding objects in the project.')
else:
count = layerWrapper.layer.syncObjects(objs)
print('Sync with ' + str(count) + ' objects for layer ' + layerDef.Name)
return layerWrapper
|
lgpl-3.0
| -7,984,074,763,647,838,000
| 37.523077
| 107
| 0.676384
| false
| 3.729891
| false
| false
| false
|
abendig/django-mailchimp
|
mailchimp/models.py
|
1
|
9678
|
from django.db import models
import json as simplejson
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from mailchimp.utils import get_connection
class QueueManager(models.Manager):
def queue(self, campaign_type, contents, list_id, template_id, subject,
from_email, from_name, to_email, folder_id=None, tracking_opens=True,
tracking_html_clicks=True, tracking_text_clicks=False, title=None,
authenticate=False, google_analytics=None, auto_footer=False,
auto_tweet=False, segment_options=False, segment_options_all=True,
segment_options_conditions=[], type_opts={}, obj=None, extra_info=[]):
"""
Queue a campaign
"""
kwargs = locals().copy()
kwargs['segment_options_conditions'] = simplejson.dumps(segment_options_conditions)
kwargs['type_opts'] = simplejson.dumps(type_opts)
kwargs['contents'] = simplejson.dumps(contents)
kwargs['extra_info'] = simplejson.dumps(extra_info)
for thing in ('template_id', 'list_id'):
thingy = kwargs[thing]
if hasattr(thingy, 'id'):
kwargs[thing] = thingy.id
del kwargs['self']
del kwargs['obj']
if obj:
kwargs['object_id'] = obj.pk
kwargs['content_type'] = ContentType.objects.get_for_model(obj)
return self.create(**kwargs)
def dequeue(self, limit=None):
if limit:
qs = self.filter(locked=False)[:limit]
else:
qs = self.filter(locked=False)
for obj in qs:
yield obj.send()
def get_or_404(self, *args, **kwargs):
return get_object_or_404(self.model, *args, **kwargs)
class Queue(models.Model):
"""
A FIFO queue for async sending of campaigns
"""
campaign_type = models.CharField(max_length=50)
contents = models.TextField()
list_id = models.CharField(max_length=50)
template_id = models.PositiveIntegerField()
subject = models.CharField(max_length=255)
from_email = models.EmailField()
from_name = models.CharField(max_length=255)
to_email = models.EmailField()
folder_id = models.CharField(max_length=50, null=True, blank=True)
tracking_opens = models.BooleanField(default=True)
tracking_html_clicks = models.BooleanField(default=True)
tracking_text_clicks = models.BooleanField(default=False)
title = models.CharField(max_length=255, null=True, blank=True)
authenticate = models.BooleanField(default=False)
google_analytics = models.CharField(max_length=100, blank=True, null=True)
auto_footer = models.BooleanField(default=False)
generate_text = models.BooleanField(default=False)
auto_tweet = models.BooleanField(default=False)
segment_options = models.BooleanField(default=False)
segment_options_all = models.BooleanField(default=False)
segment_options_conditions = models.TextField()
type_opts = models.TextField()
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
extra_info = models.TextField(null=True)
locked = models.BooleanField(default=False)
objects = QueueManager()
def send(self):
"""
send (schedule) this queued object
"""
# check lock
if self.locked:
return False
# aquire lock
self.locked = True
self.save()
# get connection and send the mails
c = get_connection()
tpl = c.get_template_by_id(self.template_id)
content_data = dict([(str(k), v) for k,v in simplejson.loads(self.contents).items()])
built_template = tpl.build(**content_data)
tracking = {'opens': self.tracking_opens,
'html_clicks': self.tracking_html_clicks,
'text_clicks': self.tracking_text_clicks}
if self.google_analytics:
analytics = {'google': self.google_analytics}
else:
analytics = {}
segment_opts = {'match': 'all' if self.segment_options_all else 'any',
'conditions': simplejson.loads(self.segment_options_conditions)}
type_opts = simplejson.loads(self.type_opts)
title = self.title or self.subject
camp = c.create_campaign(self.campaign_type, c.get_list_by_id(self.list_id),
built_template, self.subject, self.from_email, self.from_name,
self.to_email, self.folder_id, tracking, title, self.authenticate,
analytics, self.auto_footer, self.generate_text, self.auto_tweet,
segment_opts, type_opts)
if camp.send_now_async():
self.delete()
kwargs = {}
if self.content_type and self.object_id:
kwargs['content_type'] = self.content_type
kwargs['object_id'] = self.object_id
if self.extra_info:
kwargs['extra_info'] = simplejson.loads(self.extra_info)
return Campaign.objects.create(camp.id, segment_opts, **kwargs)
# release lock if failed
self.locked = False
self.save()
return False
def get_dequeue_url(self):
return reverse('mailchimp_dequeue', kwargs={'id': self.id})
def get_cancel_url(self):
return reverse('mailchimp_cancel', kwargs={'id': self.id})
def get_list(self):
return get_connection().lists[self.list_id]
@property
def object(self):
"""
The object might have vanished until now, so triple check that it's there!
"""
if self.object_id:
model = self.content_type.model_class()
try:
return model.objects.get(id=self.object_id)
except model.DoesNotExist:
return None
return None
def get_object_admin_url(self):
if not self.object:
return ''
name = 'admin:%s_%s_change' % (self.object._meta.app_label,
self.object._meta.module_name)
return reverse(name, args=(self.object.pk,))
def can_dequeue(self, user):
if user.is_superuser:
return True
if not user.is_staff:
return False
if callable(getattr(self.object, 'mailchimp_can_dequeue', None)):
return self.object.mailchimp_can_dequeue(user)
return user.has_perm('mailchimp.can_send') and user.has_perm('mailchimp.can_dequeue')
class CampaignManager(models.Manager):
def create(self, campaign_id, segment_opts, content_type=None, object_id=None,
extra_info=[]):
con = get_connection()
camp = con.get_campaign_by_id(campaign_id)
extra_info = simplejson.dumps(extra_info)
obj = self.model(content=camp.content, campaign_id=campaign_id,
name=camp.title, content_type=content_type, object_id=object_id,
extra_info=extra_info)
obj.save()
segment_opts = dict([(str(k), v) for k,v in segment_opts.items()])
for email in camp.list.filter_members(segment_opts):
Reciever.objects.create(campaign=obj, email=email)
return obj
def get_or_404(self, *args, **kwargs):
return get_object_or_404(self.model, *args, **kwargs)
class DeletedCampaign(object):
subject = u'<deleted from mailchimp>'
class Campaign(models.Model):
sent_date = models.DateTimeField(auto_now_add=True)
campaign_id = models.CharField(max_length=50)
content = models.TextField()
name = models.CharField(max_length=255)
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = generic.GenericForeignKey('content_type', 'object_id')
extra_info = models.TextField(null=True)
objects = CampaignManager()
class Meta:
ordering = ['-sent_date']
permissions = [('can_view', 'Can view Mailchimp information'),
('can_send', 'Can send Mailchimp newsletters')]
verbose_name = _('Mailchimp Log')
verbose_name_plural = _('Mailchimp Logs')
def get_absolute_url(self):
return reverse('mailchimp_campaign_info', kwargs={'campaign_id': self.campaign_id})
def get_object_admin_url(self):
if not self.object:
return ''
name = 'admin:%s_%s_change' % (self.object._meta.app_label,
self.object._meta.module_name)
return reverse(name, args=(self.object.pk,))
def get_extra_info(self):
if self.extra_info:
return simplejson.loads(self.extra_info)
return []
@property
def object(self):
"""
The object might have vanished until now, so triple check that it's there!
"""
if self.object_id:
model = self.content_type.model_class()
try:
return model.objects.get(id=self.object_id)
except model.DoesNotExist:
return None
return None
@property
def mc(self):
try:
if not hasattr(self, '_mc'):
self._mc = get_connection().get_campaign_by_id(self.campaign_id)
return self._mc
except:
return DeletedCampaign()
class Reciever(models.Model):
campaign = models.ForeignKey(Campaign, related_name='recievers')
email = models.EmailField()
|
bsd-3-clause
| -397,447,282,722,257,540
| 37.712
| 93
| 0.626886
| false
| 3.8712
| false
| false
| false
|
bashu/fluentcms-filer
|
fluentcms_filer/file/south_migrations/0001_initial.py
|
1
|
10131
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FilerFileItem'
db.create_table(u'contentitem_file_filerfileitem', (
(u'contentitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fluent_contents.ContentItem'], unique=True, primary_key=True)),
('file', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['filer.File'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('target', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
))
db.send_create_signal(u'file', ['FilerFileItem'])
def backwards(self, orm):
# Deleting model 'FilerFileItem'
db.delete_table(u'contentitem_file_filerfileitem')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'file.filerfileitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'FilerFileItem', 'db_table': "u'contentitem_file_filerfileitem'", '_ormbases': ['fluent_contents.ContentItem']},
u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'fluent_contents.contentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set+'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['file']
|
apache-2.0
| -6,681,037,321,661,970,000
| 82.04918
| 210
| 0.566183
| false
| 3.562236
| false
| false
| false
|
WilliamDiakite/ExperimentationsACA
|
processing/lsa.py
|
1
|
3364
|
import os
import sys
import itertools
import operator
import nltk
import numpy as np
import matplotlib.pyplot as plt
from nltk.util import ngrams
from collections import Counter
from spell_checker import SpellChecker
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
sys.path.insert(0, '/Users/diakite_w/Documents/Dev/ExperimentationsACA/FrenchLefffLemmatizer')
from FrenchLefffLemmatizer import FrenchLefffLemmatizer
def extract_ngrams(documents, n):
'''
Return list of n-grams
'''
chained_documents = list(itertools.chain.from_iterable(documents))
return Counter(ngrams(chained_documents, n))
def tokenize(text):
fll = FrenchLefffLemmatizer()
splck = SpellChecker()
contracted_pronouns = ["l'", "m'", "n'", "d'", "c'", "j'", "qu'", "s'"]
dictionnary = []
stopwords = [w.rstrip() for w in open('stopwords-fr.txt')]
# Put everything to lower case
text = text.lower()
# Tokenize text
tokens = nltk.tokenize.word_tokenize(text)
print('Nombre de tokens dans le texte :', len(tokens))
#tokens = [splck.correct(t) if t not in dictionnary else t for t in tokens]
# Remove contacted pronous from tokens
tokens = [t[2:] if t[:2] in contracted_pronouns else t for t in tokens]
tokens = [t for t in tokens if len(t) > 2]
tokens = [t for t in tokens if t not in stopwords]
tokens = [fll.lemmatize(t) for t in tokens]
print('Nombre de tokens apres traitement :', len(tokens), '\n')
return tokens
def tokens_to_vec(tokens):
vec = np.zeros(len(word_index_map))
for token in tokens:
idx = word_index_map[token]
vec[idx] = 1
return vec
def read_txt(textfile):
with open(textfile, 'r') as f:
text = f.read()
text = text.replace('\n', ' ')
text = text.replace('- ', '')
text = text.replace('.', '')
text = text.replace('-', '')
text = text.replace("‘l'", 'ï')
return text
def get_all_doc(directory):
'''
Read all txt documents and append them in string
'''
documents = []
counter = 1
for filename in os.listdir(directory):
if filename.endswith('.txt'):
print('\n[...] Reading document', counter)
filename = 'data/' + filename
documents.append(read_txt(filename))
counter += 1
return documents
documents = get_all_doc('data/')
all_tokens = [tokenize(doc) for doc in documents]
vocabulary = list(set(itertools.chain.from_iterable(all_tokens)))
print ('\nVocab size:', len(vocabulary))
# Computing n-grams
bigrams = extract_ngrams(all_tokens, 2)
trigrams = extract_ngrams(all_tokens, 3)
[print(t) for t in trigrams.most_common(5)]
print('\n')
[print(t) for t in bigrams.most_common(10)]
'''
# Key: word - value: index
word_index_map = {j: i for i, j in enumerate(vocabulary)}
# Key: index - value: word
index_word_map = sorted(word_index_map.items(), key=operator.itemgetter(1))
index_word_map = [t[0] for t in index_word_map]
N = len(documents)
D = len(word_index_map)
X = np.zeros((D,N))
i = 0
for tokens in all_tokens:
X[:,i] = tokens_to_vec(tokens)
i += 1
print(X.shape)
svd = TruncatedSVD()
Z = svd.fit_transform(X)
print('Z shape', Z.shape)
plt.scatter(Z[:,0], Z[:,1])
print('D:', D)
for i in range(D):
plt.annotate(s=index_word_map[i], xy=(Z[i,0], Z[i,1]))
plt.show()
'''
|
mit
| -8,408,729,182,994,486,000
| 20.96732
| 94
| 0.664683
| false
| 2.995544
| false
| false
| false
|
babble/babble
|
include/jython/Lib/asyncore.py
|
1
|
17033
|
# -*- Mode: Python -*-
# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""Basic infrastructure for asynchronous socket service clients and servers.
There are only two ways to have a program on a single processor do "more
than one thing at a time". Multi-threaded programming is the simplest and
most popular way to do it, but there is another very different technique,
that lets you have nearly all the advantages of multi-threading, without
actually using multiple threads. it's really only practical if your program
is largely I/O bound. If your program is CPU bound, then pre-emptive
scheduled threads are probably what you really need. Network servers are
rarely CPU-bound, however.
If your operating system supports the select() system call in its I/O
library (and nearly all do), then you can use it to juggle multiple
communication channels at once; doing other work while your I/O is taking
place in the "background." Although this strategy can seem strange and
complex, especially at first, it is in many ways easier to understand and
control than multi-threaded programming. The module documented here solves
many of the difficult problems for you, making the task of building
sophisticated high-performance network servers and clients a snap.
"""
import exceptions
import select
import socket
import sys
import time
import os
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, \
ENOTCONN, ESHUTDOWN, EINTR, EISCONN
try:
socket_map
except NameError:
socket_map = {}
class ExitNow(exceptions.Exception):
pass
def read(obj):
try:
obj.handle_read_event()
except ExitNow:
raise
except:
obj.handle_error()
def write(obj):
try:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def readwrite(obj, flags):
try:
if flags & select.POLLIN:
obj.handle_read_event()
if flags & select.POLLOUT:
obj.handle_write_event()
except ExitNow:
raise
except:
obj.handle_error()
def poll(timeout=0.0, map=None):
if map is None:
map = socket_map
if map:
r = []; w = []; e = []
for fd, obj in map.items():
if obj.readable():
r.append(fd)
if obj.writable():
w.append(fd)
if [] == r == w == e:
time.sleep(timeout)
else:
try:
r, w, e = select.select(r, w, e, timeout)
except select.error, err:
if err[0] != EINTR:
raise
else:
return
for fd in r:
obj = map.get(fd)
if obj is None:
continue
read(obj)
for fd in w:
obj = map.get(fd)
if obj is None:
continue
write(obj)
def poll2(timeout=0.0, map=None):
import poll
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
if map:
l = []
for fd, obj in map.items():
flags = 0
if obj.readable():
flags = poll.POLLIN
if obj.writable():
flags = flags | poll.POLLOUT
if flags:
l.append((fd, flags))
r = poll.poll(l, timeout)
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
def poll3(timeout=0.0, map=None):
# Use the poll() support added to the select module in Python 2.0
if map is None:
map = socket_map
if timeout is not None:
# timeout is in milliseconds
timeout = int(timeout*1000)
pollster = select.poll()
if map:
for fd, obj in map.items():
flags = 0
if obj.readable():
flags = select.POLLIN
if obj.writable():
flags = flags | select.POLLOUT
if flags:
pollster.register(fd, flags)
try:
r = pollster.poll(timeout)
except select.error, err:
if err[0] != EINTR:
raise
r = []
for fd, flags in r:
obj = map.get(fd)
if obj is None:
continue
readwrite(obj, flags)
def loop(timeout=30.0, use_poll=0, map=None):
if map is None:
map = socket_map
if use_poll:
if hasattr(select, 'poll'):
poll_fun = poll3
else:
poll_fun = poll2
else:
poll_fun = poll
while map:
poll_fun(timeout, map)
class dispatcher:
debug = 0
connected = 0
accepting = 0
closing = 0
addr = None
def __init__(self, sock=None, map=None):
if sock:
self.set_socket(sock, map)
# I think it should inherit this anyway
self.socket.setblocking(0)
self.connected = 1
# XXX Does the constructor require that the socket passed
# be connected?
try:
self.addr = sock.getpeername()
except socket.error:
# The addr isn't crucial
pass
else:
self.socket = None
def __repr__(self):
status = [self.__class__.__module__+"."+self.__class__.__name__]
if self.accepting and self.addr:
status.append('listening')
elif self.connected:
status.append('connected')
if self.addr is not None:
try:
status.append('%s:%d' % self.addr)
except TypeError:
status.append(repr(self.addr))
# On some systems (RH10) id() can be a negative number.
# work around this.
MAX = 2L*sys.maxint+1
return '<%s at %#x>' % (' '.join(status), id(self)&MAX)
def add_channel(self, map=None):
#self.log_info('adding channel %s' % self)
if map is None:
if hasattr(self, '_map'):
map = self._map
del self._map
else:
map = socket_map
if not hasattr(self, '_fileno'):
self._fileno = self.socket.fileno()
map[self._fileno] = self
def del_channel(self, map=None):
fd = self._fileno
if map is None:
map = socket_map
if map.has_key(fd):
#self.log_info('closing channel %d:%s' % (fd, self))
del map[fd]
def create_socket(self, family, type):
self.family_and_type = family, type
self.socket = socket.socket(family, type)
self.socket.setblocking(0)
def set_socket(self, sock, map=None):
self.socket = sock
## self.__dict__['socket'] = sock
if sock.fileno():
self.add_channel(map)
else:
self._map = map
def set_reuse_addr(self):
# try to re-use a server port if possible
try:
self.socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR,
self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR) | 1
)
except socket.error:
pass
# ==================================================
# predicates for select()
# these are used as filters for the lists of sockets
# to pass to select().
# ==================================================
def readable(self):
return True
if os.name == 'mac':
# The macintosh will select a listening socket for
# write if you let it. What might this mean?
def writable(self):
return not self.accepting
else:
def writable(self):
return True
# ==================================================
# socket object methods.
# ==================================================
def listen(self, num):
self.accepting = 1
if os.name == 'nt' and num > 5:
num = 1
ret = self.socket.listen(num)
self.add_channel()
return ret
def bind(self, addr):
self.addr = addr
return self.socket.bind(addr)
def connect(self, address):
self.connected = 0
err = self.socket.connect_ex(address)
# XXX Should interpret Winsock return values
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK):
return
if err in (0, EISCONN):
self.add_channel()
self.addr = address
self.connected = 1
self.handle_connect()
else:
raise socket.error, err
def accept(self):
# XXX can return either an address pair or None
try:
conn, addr = self.socket.accept()
self.add_channel()
return conn, addr
except socket.error, why:
if why[0] == EWOULDBLOCK:
pass
else:
raise socket.error, why
def send(self, data):
try:
result = self.socket.send(data)
return result
except socket.error, why:
if why[0] == EWOULDBLOCK:
return 0
else:
raise socket.error, why
return 0
def recv(self, buffer_size):
try:
data = self.socket.recv(buffer_size)
if not data:
# a closed connection is indicated by signaling
# a read condition, and having recv() return 0.
self.handle_close()
return ''
else:
return data
except socket.error, why:
# winsock sometimes throws ENOTCONN
if why[0] in [ECONNRESET, ENOTCONN, ESHUTDOWN]:
self.handle_close()
return ''
else:
raise socket.error, why
def close(self):
self.del_channel()
self.socket.close()
# cheap inheritance, used to pass all other attribute
# references to the underlying socket object.
def __getattr__(self, attr):
return getattr(self.socket, attr)
# log and log_info may be overridden to provide more sophisticated
# logging and warning methods. In general, log is for 'hit' logging
# and 'log_info' is for informational, warning and error logging.
def log(self, message):
sys.stderr.write('log: %s\n' % str(message))
def log_info(self, message, type='info'):
if __debug__ or type != 'info':
print '%s: %s' % (type, message)
def handle_read_event(self):
if self.accepting:
# for an accepting socket, getting a read implies
# that we are connected
if not self.connected:
self.connected = 1
self.handle_accept()
elif not self.connected:
self.handle_connect()
self.connected = 1
self.handle_read()
else:
self.handle_read()
def handle_write_event(self):
# getting a write implies that we are connected
if not self.connected:
self.handle_connect()
self.connected = 1
self.handle_write()
def handle_expt_event(self):
self.handle_expt()
def handle_error(self):
nil, t, v, tbinfo = compact_traceback()
# sometimes a user repr method will crash.
try:
self_repr = repr(self)
except:
self_repr = '<__repr__(self) failed for object at %0x>' % id(self)
self.log_info(
'uncaptured python exception, closing channel %s (%s:%s %s)' % (
self_repr,
t,
v,
tbinfo
),
'error'
)
self.close()
def handle_expt(self):
self.log_info('unhandled exception', 'warning')
def handle_read(self):
self.log_info('unhandled read event', 'warning')
def handle_write(self):
self.log_info('unhandled write event', 'warning')
def handle_connect(self):
self.log_info('unhandled connect event', 'warning')
def handle_accept(self):
self.log_info('unhandled accept event', 'warning')
def handle_close(self):
self.log_info('unhandled close event', 'warning')
self.close()
# ---------------------------------------------------------------------------
# adds simple buffered output capability, useful for simple clients.
# [for more sophisticated usage use asynchat.async_chat]
# ---------------------------------------------------------------------------
class dispatcher_with_send(dispatcher):
def __init__(self, sock=None):
dispatcher.__init__(self, sock)
self.out_buffer = ''
def initiate_send(self):
num_sent = 0
num_sent = dispatcher.send(self, self.out_buffer[:512])
self.out_buffer = self.out_buffer[num_sent:]
def handle_write(self):
self.initiate_send()
def writable(self):
return (not self.connected) or len(self.out_buffer)
def send(self, data):
if self.debug:
self.log_info('sending %s' % repr(data))
self.out_buffer = self.out_buffer + data
self.initiate_send()
# ---------------------------------------------------------------------------
# used for debugging.
# ---------------------------------------------------------------------------
def compact_traceback():
t, v, tb = sys.exc_info()
tbinfo = []
assert tb # Must have a traceback
while tb:
tbinfo.append((
tb.tb_frame.f_code.co_filename,
tb.tb_frame.f_code.co_name,
str(tb.tb_lineno)
))
tb = tb.tb_next
# just to be safe
del tb
file, function, line = tbinfo[-1]
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo])
return (file, function, line), t, v, info
def close_all(map=None):
if map is None:
map = socket_map
for x in map.values():
x.socket.close()
map.clear()
# Asynchronous File I/O:
#
# After a little research (reading man pages on various unixen, and
# digging through the linux kernel), I've determined that select()
# isn't meant for doing asynchronous file i/o.
# Heartening, though - reading linux/mm/filemap.c shows that linux
# supports asynchronous read-ahead. So _MOST_ of the time, the data
# will be sitting in memory for us already when we go to read it.
#
# What other OS's (besides NT) support async file i/o? [VMS?]
#
# Regardless, this is useful for pipes, and stdin/stdout...
if os.name == 'posix':
import fcntl
class file_wrapper:
# here we override just enough to make a file
# look like a socket for the purposes of asyncore.
def __init__(self, fd):
self.fd = fd
def recv(self, *args):
return os.read(self.fd, *args)
def send(self, *args):
return os.write(self.fd, *args)
read = recv
write = send
def close(self):
return os.close(self.fd)
def fileno(self):
return self.fd
class file_dispatcher(dispatcher):
def __init__(self, fd):
dispatcher.__init__(self)
self.connected = 1
# set it to non-blocking mode
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
self.set_file(fd)
def set_file(self, fd):
self._fileno = fd
self.socket = file_wrapper(fd)
self.add_channel()
|
apache-2.0
| 6,117,678,477,258,131,000
| 29.361854
| 78
| 0.540422
| false
| 4.155404
| false
| false
| false
|
pjh/vm-analyze
|
analyze/ip_to_fn.py
|
1
|
21352
|
# Virtual memory analysis scripts.
# Developed 2012-2014 by Peter Hornyack, pjh@cs.washington.edu
# Copyright (c) 2012-2014 Peter Hornyack and University of Washington
# This file contains methods that implement a wrapper around the
# binutils "addr2line" utility, which can be used to look up instruction
# pointer values in executable files and shared object files to find
# the function (and sometimes the source code file + line number) that
# contains the ip.
# Note that each instance of "addr2line -e /path/to/binary..." will load
# that entire binary into memory while it runs; this is annoying for
# enormous binaries like firefox's libxul.so.
from util.pjh_utils import *
from analyze.vm_mapping_class import UNKNOWN_FN
import fcntl
import os
import shlex
import subprocess
import sys
import time
cache_addr2line_lookups = True
# With caching disabled, less memory will be consumed, but it will take
# 14 minutes to analyze the function lookups of a firefox trace. With
# caching enabled, the analysis only takes 2 minutes.
addr2line_prog = '/usr/bin/addr2line'
file_prog = '/usr/bin/file'
linux_code_startaddr = int("0x400000", 16)
# On x86_64 Linux anyway, all non-relocatable executables are loaded
# into virtual address space at this address, I believe.
# Given the filename of an executable file or a shared object file,
# determines if the file is relocatable. All shared object files should
# be relocatable, and most executable files are non-relocatable, but it
# is possible to build "position independent executables" (see the "-fpic"
# and "-pie" flags in gcc(1)).
#
# This method is intended to be used when determining function names
# from instruction pointers using addr2line: if the file is relocatable,
# then an absolute ip should have the address of the file's memory mapping
# subtracted from it before passing it to addr2line. If the file is not
# relocatable, then the absolute ip can be passed directly to addr2line.
# Note that this method must create a child subprocess to check the file,
# so try not to call it too often.
#
# Returns: True/False if object file is relocatable or not, or None if an
# error occurred.
def is_objfile_relocatable(name):
tag = 'is_objfile_relocatable'
global file_prog
# Command line that I think makes sense:
# file -e apptype -e ascii -e encoding -e tokens -e cdf -e elf -e tar
# -bn <filename>
# This should return one of the following strings, indicating that the
# file is relocatable or not:
# ELF 64-bit LSB shared object, x86-64, version 1 (SYSV)
# ELF 64-bit LSB executable, x86-64, version 1 (SYSV)
# (even position-independent executables will be described as "shared
# object").
filecmd = ("{} -e apptype -e ascii -e encoding -e tokens -e cdf "
"-e elf -e tar -bn {}").format(file_prog, name)
# don't use -p flag, so that output will *always* have two lines
fileargs = shlex.split(filecmd)
print_debug(tag, ("fileargs: {}").format(fileargs))
p = subprocess.Popen(fileargs, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if not p:
print_error(tag, "Popen failed for command {}".format(filecmd))
return None
# communicate() will wait for the process to terminate and will
# read its output. A "timeout" arg was added for Python 3.3, but
# verbena is only running 3.2.3 right now, so hope that the process
# will always terminate.
(out, err) = p.communicate()
#retcode = p.poll() # unnecessary, I think
#retcode = p.wait() # unnecessary, I think
retcode = p.returncode
if retcode is None:
print_error(tag, ("unexpected: got a None retcode - subprocess "
"has not terminated yet?!").format())
return None
elif retcode != 0:
print_error(tag, ("file command returned a non-zero error code: "
"{}").format(retcode))
return None
if out:
# Convert from bytes back to string:
out = out.decode('utf-8').strip()
else:
print_error(tag, "got no output from file subprocess")
return None
if err:
err = err.decode('utf-8').strip()
else:
err = ''
print_debug(tag, ("call to file subprocess succeeded, got stdout "
"{} and stderr {}").format(out, err))
# It's probably not necessary to define the expected output strings
# so strictly here, but this will cause an error if we ever e.g.
# move to a different architecture, at which point we can double-
# check this code to make sure it makes sense for non-x86-64.
# Ah, I already found one thing that's not consistent: some files
# are "version 1 (SYSV)", others are "version 1 (GNU/Linux)".
reloc_str = 'ELF 64-bit LSB shared object, x86-64, version 1'
nonreloc_str = 'ELF 64-bit LSB executable, x86-64, version 1'
if reloc_str in out:
print_debug(tag, ("relocatable: {}").format(reloc_str))
return True
elif nonreloc_str in out:
print_debug(tag, ("nonrelocatable: {}").format(nonreloc_str))
return False
print_error(tag, ("unexpected output \"{}\", doesn't match "
"expected output from file command").format(out))
print_error(tag, ("output: {}").format(repr(out)))
print_error(tag, ("reloc_str: {}").format(repr(reloc_str)))
print_error(tag, ("nonreloc_str: {}").format(repr(nonreloc_str)))
return None
##############################################################################
# Creates an addr2line instance (subprocess) for a particular code module
# (executable file or shared object file).
# This class probably shouldn't be used directly; use the ip_to_fn_converter
# class below instead.
class addr2line_module:
tag = 'addr2line_module'
# Members:
objname = None
relocatable = None
a2l = None # Popen class instance representing an addr2line subprocess
cache = None
def __init__(self, objname):
tag = "{}.__init__".format(self.tag)
if not objname:
print_error_exit(tag, "must provide an object name")
self.objname = objname
self.tag = "addr2line_module-{}".format(objname)
self.relocatable = is_objfile_relocatable(objname)
if self.relocatable is None:
#print_error_exit(tag, ("is_objfile_relocatable() returned "
# "error, not sure how to handle gracefully inside of "
# "this constructor so aborting.").format())
print_error(tag, ("is_objfile_relocatable() returned "
"error, not sure how to handle gracefully inside of "
"this constructor so aborting...").format())
return None
elif self.relocatable is True:
print_debug(tag, ("determined that object file {} is "
"relocatable, will subtract vma_start_addr from ips "
"passed to this addr2line_module").format(objname))
else:
print_debug(tag, ("determined that object file {} is "
"not relocatable, will use absolute ips that are passed "
"to this addr2line_module").format(objname))
ret = self.start_addr2line()
if ret != 0:
print_error_exit(tag, ("failed to start addr2line "
"subprocess").format())
self.cache = dict()
return
# Returns: the fn corresponding to this ip if it is found in the
# cache map, or None if not found.
def cache_lookup(self, ip):
tag = "{}.cache_lookup".format(self.tag)
try:
fn = self.cache[ip]
except KeyError:
return None
return fn
# Inserts the specified ip, fn pair into the addr2line "cache" for
# this module.
# "Cache" isn't quite the right term, as nothing is ever evicted;
# it's just a dictionary...
def cache_insert(self, ip, fn):
tag = "{}.cache_insert".format(self.tag)
try:
fn = self.cache[ip]
print_error_exit(tag, ("unexpected: already a cache entry "
"for ip {} -> {}").format(hex(ip), fn))
except KeyError:
self.cache[ip] = fn
print_debug(tag, ("cache insert {} -> {}").format(hex(ip), fn))
return
# Passes the specified ip to addr2line and returns the function that
# it corresponds to, if found.
# ip should be a base-10 integer!
# Returns: the function name if addr2line was able to lookup the ip
# successfully, or '' if addr2line was unsuccessful. Returns None
# on error.
def ip_to_fn(self, ip, vma_start_addr):
tag = "{}.ip_to_fn".format(self.tag)
global linux_code_startaddr
global cache_addr2line_lookups
if not self.a2l:
print_debug(tag, ("self.a2l is None, addr2line subprocess "
"is already terminated (or was never started)").format())
return None
if type(ip) != int:
print_error(tag, ("ip argument {} is not an int").format(ip))
return None
if vma_start_addr is None or type(vma_start_addr) != int:
print_error(tag, ("invalid vma_start_addr: {}").format(
vma_start_addr))
return None
# For relocatable object files, we must subtract the vma start
# addr (the address where the file was mapped into the process'
# address space) from the ip, which is assumed to be an absolute
# ip from an execution's userstacktrace. For non-relocatable
# executables, we directly use the absolute ip.
if self.relocatable:
#print_debug(tag, ("file {} is relocatable, so subtracting "
# "vma_start_addr {} from absolute ip {} to get ip for "
# "addr2line function lookup: {}").format(self.objname,
# hex(vma_start_addr), hex(ip), hex(ip - vma_start_addr)))
if vma_start_addr > ip:
print_error_exit(tag, ("unexpected: vma_start_addr {} "
"> ip {}").format(hex(vma_start_addr), hex(ip)))
ip -= vma_start_addr
else:
#print_debug(tag, ("file {} is not relocatable, so directly "
# "using absolute ip {} and ignoring vma_start_addr "
# "{}").format(self.objname, hex(ip), hex(vma_start_addr)))
if vma_start_addr != linux_code_startaddr:
print_error_exit(tag, ("file is non-relocatable, but "
"its start addr {} doesn't match expected value for "
"64-bit Linux, {} - is this expected?").format(
hex(vma_start_addr), hex(linux_code_startaddr)))
# See if we've already looked up this ip for this module.
# Important: this must come after the ip is offset for relocatable
# modules; ip must not change between now and when it is inserted
# into the cache below.
if cache_addr2line_lookups:
cache_lookup_ip = ip # for sanity checking
fn = self.cache_lookup(ip)
if fn:
print_debug(tag, ("cache hit: ip {} -> fn '{}'").format(
hex(ip), fn))
else:
print_debug(tag, ("cache miss: ip {}").format(hex(ip)))
# Communicate with addr2line process if cache lookups are disabled
# or the cache lookup just missed.
if not cache_addr2line_lookups or fn is None:
# Stupidly, it appears that Python's subprocess module can't
# be used to communicate multiple times with an interactive
# subprocess.
# http://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
# http://stackoverflow.com/questions/3065060/communicate-multiple-times-with-a-process-without-breaking-the-pipe
# http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# http://stackoverflow.com/questions/11457931/running-an-interactive-command-from-within-python
# It appears that the subprocess' stdin and stdout can just be
# written and read directly instead. It appears that the input
# string written to stdin must be converted to bytes first, and
# then any output read from stdout must be converted from a byte
# string back to a standard str as well.
#print_debug(tag, ("addr2line: lookup ip {} in object file "
# "{}").format(hex(ip), self.objname))
ip_input = """{}
""".format(hex(ip))
# send Enter keypress: to enter in vim insert mode, hit
# Ctrl-v first
self.a2l.stdin.write(bytearray(ip_input, 'utf-8'))
#print_debug(tag, "a2l.stdin.write returned")
# Read the output from addr2line:
# http://docs.python.org/3/tutorial/inputoutput.html#methods-of-file-objects
# If self.a2l.stdout.readline() is used to read lines of output
# here, then after reading all of the lines, the next call to
# readline() will block forever. A possible workaround is to
# always just call readline() exactly twice, since that's what
# we expect addr2line's output to be, but this seems fragile.
# Instead, can we just call read(), which will read "the entire
# contents of the file"? This will block as well, since there
# is no EOF at the end of the output. According to some stack
# overflow answer for providing non-blocking reads in Python,
# we may be able to use the fcntl module to mark file
# descriptors as non-blocking.
# http://stackoverflow.com/a/1810703/1230197
# This seems to work a little better, although now the problem
# is that after writing to stdin, the python script here will
# likely attempt to read stdout before addr2line has had a
# chance to write to it. The problem is that we want to block
# <a little bit>, but not forever...
# Fragile but working solution: keep reading until two newlines
# have been encountered, or until the process has terminated.
# As far as I can tell addr2line will always return two lines
# of output when started with the "-Cif" flags, even if
# gibberish input is provided.
# $ addr2line -e test-programs/hello-world -Cif
# 1234
# ??
# ??:0
# 0x4006d9
# _start
# ??:0
fd = self.a2l.stdout.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
output = ""
linecount = 0
loopcount = 0
while linecount < 2:
# In practice, it looks like this loop may run one or more
# times (e.g. 41 times) without reading anything from
# self.a2l.stdout, but then when there is data available
# for reading, it is all available at once (both lines that
# we expect).
bytestr = self.a2l.stdout.read()
if bytestr and len(bytestr) > 0:
buf = bytestr.decode('utf-8')
output += buf
linecount = len(output.splitlines())
if False:
# When this code is enabled and stderr is set to
# subprocess.PIPE when self.a2l if Popen'd, it
# didn't seem to help - stderr.read() here never
# ever returns.
bytestrerr = self.a2l.stderr.read()
if bytestrerr and len(bytestrerr) > 0:
buf = bytestrerr.decode('utf-8')
output += buf
linecount = len(output.splitlines())
print_error_exit(tag, ("stderr.read(): output={}, "
"linecount={}").format(output, linecount))
print_error_exit(tag, ("BUMMER: this code was broken for "
"some reason after upgrading from Ubuntu 12.04 to 13.04 "
"(or something else broke it, but I'm not sure what); "
"perhaps due to python3 upgrade, or maybe a change to "
"addr2line. In the loop below, the stdout.read() never "
"actually returns anything, and we will just loop "
"here forever.").format())
loopcount += 1
if loopcount % 50000 == 0:
# Lookup time appears to depend on the size of the object
# file, which makes sense I guess; for a test lookup in
# my version of libc, I saw loopcount up to 10,000.
#print_debug(tag, ("loopcount is {}, checking if "
# "addr2line is still alive").format(loopcount))
self.a2l.poll()
if self.a2l.returncode:
print_error(tag, ("addr2line subprocess has "
"terminated with retcode {}, returning error "
"from this fn").format(self.a2l.returncode))
return None
else:
print_debug(tag, ("addr2line subprocess is still "
"alive, will keep looping; output buffer so far "
"is {}").format(output))
pass
lines = output.splitlines()
# Ok, now, if addr2line was able to lookup the function name, it
# should be found in the first line of output; if not, then it
# should have printed "??".
fn = lines[0].strip()
if cache_addr2line_lookups:
if ip != cache_lookup_ip:
print_error_exit(tag, ("cache_insert ip {} doesn't match "
"cache_lookup_ip {}").format(hex(ip),
hex(cache_lookup_ip)))
self.cache_insert(ip, fn)
# This needs to happen for both the cache hit case and the
# just-looked-it-up case.
if '?' in fn:
#print_debug(tag, ("got unknown fn '{}' returned from addr2line, "
# "will return empty string from this fn").format(fn))
fn = ''
else:
#print_debug(tag, ("got fn '{}' from addr2line output {}").format(
# fn, output.replace('\n', '')))
pass
return fn
# The user should try to remember to call this function explicitly
# when done using the instance of the class, but if the user forgets,
# then the destructor (__del__) should eventually perform the same
# cleanup operations (i.e. terminating the addr2line process).
def close(self):
tag = "{}.close".format(self.tag)
self.stop_addr2line()
self.objname = None
self.relocatable = None
self.cache = None
return
# "private" method:
# Starts an instance of the addr2line program for converting ips into
# function names. Returns: 0 on success, -1 on error.
def start_addr2line(self):
tag = "{}.start_addr2line".format(self.tag)
global addr2line_prog
a2lcmd = ("{} -e {} -Cif").format(addr2line_prog, self.objname)
# don't use -p flag, so that output will *always* have two lines
a2largs = shlex.split(a2lcmd)
print_debug(tag, ("a2largs: {}").format(a2largs))
self.a2l = subprocess.Popen(a2largs, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
#stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not self.a2l:
print_error(tag, "Popen failed for command {}".format(a2lcmd))
return -1
retcode = self.a2l.poll()
if retcode:
print_error(tag, ("addr2line subprocess already "
"terminated, this is unexpected").format())
retcode = self.a2l.wait()
self.a2l = None
return -1
print_debug(tag, ("started addr2line subprocess with pid "
"{}").format(self.a2l.pid))
return 0
# "private" method:
def stop_addr2line(self):
tag = "{}.stop_addr2line".format(self.tag)
if not self.a2l:
print_debug(tag, ("self.a2l is None, addr2line subprocess "
"is already terminated (or was never started)").format())
return
# http://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
print_debug(tag, ("sending Ctrl-d to addr2line subprocess {} to "
"terminate it").format(self.a2l.pid))
stop_input = ''
# Ctrl-d: hit Ctrl-v first in vim insert mode to 'type' this
# special key
#(out, err) = self.a2l.communicate(input=stop_input)
(out, err) = self.a2l.communicate(
input=bytearray(stop_input, 'utf-8'))
# does stop_input need to be converted to bytes?? Docs appear to
# say so, but code examples don't...
if self.a2l.returncode is None:
print_error_exit(tag, ("communicate() returned, but returncode "
"is not set yet!").format())
elif self.a2l.returncode != 0:
print_warning(tag, ("terminated addr2line subprocess returned "
"error code {}").format(self.a2l.returncode))
else:
print_debug(tag, ("addr2line subprocess terminated "
"successfully").format())
self.a2l = None
return
def __del__(self):
tag = "{}.__del__".format(self.tag)
if self.a2l:
self.stop_addr2line()
return
##############################################################################
# Converts instruction pointers to function names.
# Uses one addr2line_module object per file that we perform lookups in.
class ip_to_fn_converter:
tag = 'ip_to_fn_converter'
# Members:
a2lmap = None
def __init__(self):
tag = "{}.__init__".format(self.tag)
self.a2lmap = dict()
return
# Attempts to lookup the specified instruction pointer in the specified
# file (executable file or shared object file). vma_start_addr should
# be the address (as an int) where the file was mapped into the address
# space when the ip was captured. If this address is unknown, then
# setting it to 0 will likely still work for non-relocatable executable
# files, but the lookup will likely fail (or worse, succeed incorrectly)
# for relocatable object files or position-independent executables.
# Returns: function name on success, empty string '' if the lookup
# failed, or None if there was an error.
def lookup(self, objname, ip, vma_start_addr):
tag = "{}.lookup".format(self.tag)
if (not objname or not ip or type(objname) != str or type(ip) != int
or len(objname) == 0 or vma_start_addr is None or
type(vma_start_addr) != int):
print_error(tag, ("invalid argument: objname {} must be a "
"non-empty string, ip {} must be an int, vma_start_addr "
"must be an int").format(objname, ip, vma_start_addr))
return None
# We keep one addr2line_module object per file:
try:
a2l = self.a2lmap[objname]
print_debug(tag, ("got an existing addr2line instance for "
"objname {}").format(objname))
except KeyError:
print_debug(tag, ("creating a new addr2line instance for "
"objname {}").format(objname))
a2l = addr2line_module(objname)
if not a2l:
print_error(tag, ("addr2line_module constructor "
"failed, just returning {}").format(UNKNOWN_FN))
return UNKNOWN_FN
self.a2lmap[objname] = a2l
return a2l.ip_to_fn(ip, vma_start_addr)
def close(self):
tag = "{}.close".format(self.tag)
for a2l in self.a2lmap.values():
a2l.close()
self.a2lmap = None
return
def __del__(self):
tag = "{}.__del__".format(self.tag)
if self.a2lmap:
self.close()
return
if __name__ == '__main__':
print("Cannot run stand-alone")
sys.exit(1)
|
bsd-3-clause
| 8,448,444,619,364,010,000
| 37.747731
| 117
| 0.678344
| false
| 3.225865
| true
| false
| false
|
lampertb/RPIAFIB
|
Software/afib_lib.py
|
1
|
4157
|
import sys
import numpy as np
from scipy import signal
# For testing
import csv
defaultWindowSize=120
defaultMinSNR=2
defaultNoisePercentage=10
defaultSampleRate=250
#The find peaks function takes in an array of data
#It returns an array of the peak locations after running the wavelet transform
def findPeaks(dataArray, windowSize=defaultWindowSize):
peakIndex=signal.find_peaks_cwt(dataArray, np.arange(1, windowSize), min_snr=defaultMinSNR, noise_perc=defaultNoisePercentage)
#print peakIndex
return peakIndex
#Calcuate the time interval between samples
def getRR(peakIndex, sampleRate=defaultSampleRate):
rr_data = []
for i in range(0, len(peakIndex)-1):
diff = peakIndex[i+1]-peakIndex[i]
#print "peak1 {0} - peak2 {1} Diff {2}".format(peakIndex[i+1], peakIndex[i], diff)
timeDelay = diff/float(sampleRate) #Get the time difference between samples
rr_data.append(timeDelay)
#sum+=timeDelay #create an average
#print "Sum {0}, len {1}".format(sum, len(peakIndex))
return rr_data
#AFib Detection Algorithm
from scipy.stats import norm
def Fib_Detection( x , seglen = 128):
N = len(x)
tprmean = 0.65625; tprvar = 0.001369222
# TPR mean and variance from rozinn database
afstats = {};
afstats['avg'] = [];
afstats['rmssd'] = [];
afstats['tpr'] = [];
afstats['se'] = [];
afstats['tprstat'] = [];
afstats['count'] = [];
for i in range (0,N-seglen+1):
perc = i/N*100
j = 0
segment = x[i:i+seglen]
#******************** Remove 16 outlier ********************************
#* In the outlier removal, 8 maximum and 8 minimum values are discarded
#***********************************************************************
segment_outlier = segment[:]
for j in range (0,8):
segment_outlier.remove(max(segment_outlier))
segment_outlier.remove(min(segment_outlier))
#print segment
#print segment_outlier
# Get mean
afstats['avg'].append(np.mean(segment))
# RMSSD
difference = np.subtract(segment_outlier[2:seglen-16], segment_outlier[1:seglen-17])
afstats['rmssd'].append(np.sqrt(np.sum(np.power(difference, 2))/(seglen-17))/afstats['avg'][i-1])
# TPR
j = 0
for k in range (1,seglen-1):
if ((segment[k]-segment[k-1])*(segment[k]-segment[k+1])>0):
j = j+1
afstats['tpr'].append(j/(seglen-2.0))
# Shannon Entropy
seg_max = np.max(segment_outlier)
seg_min = np.min(segment_outlier)
step = (seg_max-seg_min)/16.0;
entropy = 0;
if (step!=0):
group1 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for j in range(0,112):
z = int(np.around((segment_outlier[j]-seg_min)/step))
group1[z] = group1[z]+1
group1 = np.divide(group1,np.sum(group1)+0.0)
for j in range (0,16):
if (group1[j]>0):
entropy = entropy+group1[j]*np.log(group1[j])
afstats['se'].append(entropy/-2.7726)
# Compute the afstats
afstats['tprstat'].append(norm.cdf(afstats['tpr'][i-1], tprmean, np.sqrt(tprvar)));
if(afstats['rmssd'][i-1]>=0.1 and afstats['tprstat'][i-1]>0.0001 and afstats['tprstat'][i-1] <= 0.9999 and afstats['se'][i-1] >=0.7):
afstats['count'].append(1)
else:
afstats['count'].append(0)
return afstats
#AFib Detection from ECG file
def afib_dect():
inputFile="0403_Normal_tiny.csv"
ECG=[]
with open(inputFile,'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
ECG.append(float(row['ECG']))
data=np.asarray(ECG)
peakIndex=findPeaks(data, 200)
rr_data = getRR(peakIndex)
afstats = Fib_Detection(rr_data)
# Print result to result.txt file
outputFile = "result.txt"
result = "%d"%sum(afstats['count']);
fp = open(outputFile, 'r+')
fp.write(result);
fp.close()
return sum(afstats['count']) > 1
afib_dect();
|
mit
| 1,566,503,293,678,278,400
| 32.256
| 141
| 0.577099
| false
| 3.212519
| false
| false
| false
|
eubr-bigsea/tahiti
|
migrations/versions/38745782554d_adding_missing_port_interfaces.py
|
1
|
5671
|
# -*- coding: utf-8 -*-}
"""Adding missing port interfaces
Revision ID: 38745782554d
Revises: b2b823fe47b1
Create Date: 2017-06-07 15:16:30.224298
"""
from alembic import op
from sqlalchemy import Integer, String
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = '38745782554d'
down_revision = 'b2b823fe47b1'
branch_labels = None
depends_on = None
data = [
(34, 5),
(55, 1),
(56, 1),
(57, 11),
(37, 2),
(37, 18),
# (46, 2),
# (46, 18),
(63, 1),
(64, 1),
(73, 19),
(100, 2),
(100, 19),
(161, 17)
]
def upgrade():
try:
op.execute(text('START TRANSACTION'))
insert_operation_port_interface()
insert_operation_port_interface_translation()
insert_operation_port_interface_operation_port()
insert_operation_platform()
insert_operation_translation()
except:
op.execute(text('ROLLBACK'))
raise
def insert_operation_translation():
tb = table(
'operation_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
columns = ('id', 'locale', 'name', 'description')
rows_data = [
(73, 'en', 'Regression Model', 'Regression Model'),
(73, 'pt', 'Modelo de Regressão', 'Modelo de Regressão'),
(74, 'en', 'Isotonic Regression', 'Isotonic Regression'),
(74, 'pt', 'Regressão Isotônica', 'Regressão Isotônica'),
(75, 'en', 'One Hot Encoder',
'One hot encoding transforms categorical '
'features to a format that works better with '
'classification and regression algorithms.'),
(75, 'pt', 'One Hot Encoder',
'One Hot encoding é uma transformação que fazemos nos '
'dados para representarmos uma variável categórica de '
'forma binária (indica presença ou ausência de um valor).'),
(76, 'en', 'AFT Survival Regression',
'Accelerated Failure Time (AFT) Model Survival Regression'),
(76, 'pt', 'Regressão AFT Survival',
'Accelerated Failure Time (AFT) Model Survival Regression'),
(77, 'en', 'GBT Regressor',
'Gradient-Boosted Trees (GBTs) learning algorithm for '
'regression. It supports both continuous and categorical featur'),
(77, 'pt', 'Regressor GBT',
'Gradient-Boosted Trees (GBTs) learning algorithm for '
'regression. It supports both continuous and categorical feature'),
(78, 'en', 'Random Forest Regressor',
'Random Forest learning algorithm for regression. '
'It supports both continuous and categorical features.'),
(78, 'pt', 'Regressor Random Forest',
'Random Forest learning algorithm for regression. '
'It supports both continuous and categorical features.'),
(79, 'en', 'Generalized Linear Regressor',
'Generalized Linear Regressor'),
(79, 'pt', 'Regressor Linear Generalizado',
'Regressor Linear Generalizado'),
]
rows = [dict(list(zip(columns, row))) for row in rows_data]
op.bulk_insert(tb, rows)
def insert_operation_platform():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer), )
columns = ('operation_id', 'platform_id')
rows_data = [
(73, 1),
(74, 1),
(75, 1),
(76, 1),
(77, 1),
(78, 1),
(79, 1),
]
rows = [dict(list(zip(columns, row))) for row in rows_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface():
tb = table(
'operation_port_interface',
column('id', Integer),
column('color', String), )
columns = ('id', 'color')
interface_data = [
(19, '#AACC22')
]
rows = [dict(list(zip(columns, row))) for row in interface_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface_translation():
tb = table(
'operation_port_interface_translation',
column('id', Integer),
column('locale', String),
column('name', String), )
columns = ('id', 'locale', 'name')
interface_data = [
(19, 'pt', 'Visualização'),
(19, 'en', 'Visualization'),
]
rows = [dict(list(zip(columns, row))) for row in interface_data]
op.bulk_insert(tb, rows)
def insert_operation_port_interface_operation_port():
tb = table(
'operation_port_interface_operation_port',
column('operation_port_id', Integer),
column('operation_port_interface_id', Integer), )
columns = ('operation_port_id', 'operation_port_interface_id')
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def downgrade():
try:
for d in data:
op.execute(
text('DELETE FROM '
'operation_port_interface_operation_port '
'WHERE operation_port_id = {} '
' AND operation_port_interface_id = {}'.format(*d)))
op.execute(text('DELETE FROM operation_port_interface_translation '
'WHERE id = 19'))
op.execute(text('DELETE FROM operation_port_interface '
'WHERE id = 19'))
op.execute(text('DELETE FROM operation_platform '
'WHERE operation_id BETWEEN 73 AND 79'))
op.execute(text('DELETE FROM operation_translation '
'WHERE id BETWEEN 73 AND 79'))
except:
op.execute(text('ROLLBACK'))
raise
|
apache-2.0
| 7,238,215,253,415,339,000
| 28.915344
| 76
| 0.582773
| false
| 3.744371
| false
| false
| false
|
Barrog/C4-Datapack
|
data/jscript/quests/329_CuriosityOfDwarf/__init__.py
|
1
|
2487
|
# Made by Mr. - Version 0.3 by DrLecter
import sys
from net.sf.l2j.gameserver.model.quest import State
from net.sf.l2j.gameserver.model.quest import QuestState
from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest
GOLEM_HEARTSTONE = 1346
BROKEN_HEARTSTONE = 1365
ADENA = 57
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "7437-03.htm" :
st.set("cond","1")
st.setState(STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "7437-06.htm" :
st.exitQuest(1)
st.playSound("ItemSound.quest_finish")
return htmltext
def onTalk (Self,npc,st) :
npcId = npc.getNpcId()
htmltext = "<html><head><body>I have nothing to say you</body></html>"
id = st.getState()
if id == CREATED :
st.set("cond","0")
if int(st.get("cond"))==0 :
if st.getPlayer().getLevel() >= 33 :
htmltext = "7437-02.htm"
else:
htmltext = "7437-01.htm"
st.exitQuest(1)
else :
heart=st.getQuestItemsCount(GOLEM_HEARTSTONE)
broken=st.getQuestItemsCount(BROKEN_HEARTSTONE)
if broken+heart>0 :
st.giveItems(ADENA,50*broken+1000*heart)
st.takeItems(BROKEN_HEARTSTONE,-1)
st.takeItems(GOLEM_HEARTSTONE,-1)
htmltext = "7437-05.htm"
else:
htmltext = "7437-04.htm"
return htmltext
def onKill (self,npc,st):
npcId = npc.getNpcId()
n = st.getRandom(100)
if npcId == 85 :
if n<5 :
st.giveItems(GOLEM_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif n<58 :
st.giveItems(BROKEN_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif npcId == 83 :
if n<6 :
st.giveItems(GOLEM_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
elif n<56 :
st.giveItems(BROKEN_HEARTSTONE,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(329,"329_CuriosityOfDwarf","Curiosity Of Dwarf")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
COMPLETED = State('Completed', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(7437)
CREATED.addTalkId(7437)
STARTED.addTalkId(7437)
STARTED.addKillId(83)
STARTED.addKillId(85)
STARTED.addQuestDrop(85,BROKEN_HEARTSTONE,1)
STARTED.addQuestDrop(85,GOLEM_HEARTSTONE,1)
print "importing quests: 329: Curiosity Of Dwarf"
|
gpl-2.0
| 4,741,307,329,138,383,000
| 27.918605
| 74
| 0.650985
| false
| 2.729967
| false
| false
| false
|
skuda/client-python
|
kubernetes/client/models/v1beta1_role_ref.py
|
1
|
4581
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1RoleRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_group=None, kind=None, name=None):
"""
V1beta1RoleRef - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_group': 'str',
'kind': 'str',
'name': 'str'
}
self.attribute_map = {
'api_group': 'apiGroup',
'kind': 'kind',
'name': 'name'
}
self._api_group = api_group
self._kind = kind
self._name = name
@property
def api_group(self):
"""
Gets the api_group of this V1beta1RoleRef.
APIGroup is the group for the resource being referenced
:return: The api_group of this V1beta1RoleRef.
:rtype: str
"""
return self._api_group
@api_group.setter
def api_group(self, api_group):
"""
Sets the api_group of this V1beta1RoleRef.
APIGroup is the group for the resource being referenced
:param api_group: The api_group of this V1beta1RoleRef.
:type: str
"""
if api_group is None:
raise ValueError("Invalid value for `api_group`, must not be `None`")
self._api_group = api_group
@property
def kind(self):
"""
Gets the kind of this V1beta1RoleRef.
Kind is the type of resource being referenced
:return: The kind of this V1beta1RoleRef.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1RoleRef.
Kind is the type of resource being referenced
:param kind: The kind of this V1beta1RoleRef.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def name(self):
"""
Gets the name of this V1beta1RoleRef.
Name is the name of resource being referenced
:return: The name of this V1beta1RoleRef.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1beta1RoleRef.
Name is the name of resource being referenced
:param name: The name of this V1beta1RoleRef.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -5,183,765,010,237,596,000
| 25.633721
| 105
| 0.531107
| false
| 4.237743
| false
| false
| false
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/multiprocessing/queues.py
|
1
|
9842
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: queues.py
__all__ = [
'Queue', 'SimpleQueue', 'JoinableQueue']
import sys
import os
import threading
import collections
import time
import atexit
import weakref
from Queue import Empty, Full
import _multiprocessing
from multiprocessing import Pipe
from multiprocessing.synchronize import Lock, BoundedSemaphore, Semaphore, Condition
from multiprocessing.util import debug, info, Finalize, register_after_fork
from multiprocessing.forking import assert_spawning
class Queue(object):
def __init__(self, maxsize=0):
if maxsize <= 0:
maxsize = _multiprocessing.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._sem = BoundedSemaphore(maxsize)
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
return
def __getstate__(self):
assert_spawning(self)
return (
self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
self._maxsize, self._reader, self._writer, self._rlock, self._wlock, self._sem, self._opid = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._poll = self._reader.poll
return
def put(self, obj, block=True, timeout=None):
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
finally:
self._notempty.release()
return
def get(self, block=True, timeout=None):
if block and timeout is None:
self._rlock.acquire()
try:
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
else:
if block:
deadline = time.time() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if not self._poll(block and deadline - time.time() or 0.0):
raise Empty
res = self._recv()
self._sem.release()
return res
finally:
self._rlock.release()
return
def qsize(self):
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
self._buffer.clear()
self._thread = threading.Thread(target=Queue._feed, args=(
self._buffer, self._notempty, self._send,
self._wlock, self._writer.close), name='QueueFeederThread')
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
created_by_this_process = self._opid == os.getpid()
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(self._thread, Queue._finalize_join, [
weakref.ref(self._thread)], exitpriority=-5)
self._close = Finalize(self, Queue._finalize_close, [
self._buffer, self._notempty], exitpriority=10)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
return
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
notempty.acquire()
try:
buffer.append(_sentinel)
notempty.notify()
finally:
notempty.release()
@staticmethod
def _feed(buffer, notempty, send, writelock, close):
debug('starting thread to feed data to pipe')
from .util import is_exiting
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
if wacquire is None:
send(obj)
else:
wacquire()
try:
send(obj)
finally:
wrelease()
except IndexError:
pass
except Exception as e:
try:
if is_exiting():
info('error in queue thread: %s', e)
else:
import traceback
traceback.print_exc()
except Exception:
pass
return
_sentinel = object()
class JoinableQueue(Queue):
def __init__(self, maxsize=0):
Queue.__init__(self, maxsize)
self._unfinished_tasks = Semaphore(0)
self._cond = Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
if not self._sem.acquire(block, timeout):
raise Full
self._notempty.acquire()
self._cond.acquire()
try:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
finally:
self._cond.release()
self._notempty.release()
return
def task_done(self):
self._cond.acquire()
try:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
finally:
self._cond.release()
def join(self):
self._cond.acquire()
try:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
finally:
self._cond.release()
class SimpleQueue(object):
def __init__(self):
self._reader, self._writer = Pipe(duplex=False)
self._rlock = Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = Lock()
self._make_methods()
return
def empty(self):
return not self._reader.poll()
def __getstate__(self):
assert_spawning(self)
return (
self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
self._reader, self._writer, self._rlock, self._wlock = state
self._make_methods()
def _make_methods(self):
recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._wlock is None:
self.put = self._writer.send
else:
send = self._writer.send
wacquire, wrelease = self._wlock.acquire, self._wlock.release
def put(obj):
wacquire()
try:
return send(obj)
finally:
wrelease()
self.put = put
return
|
unlicense
| 1,957,727,825,150,317,600
| 28.47006
| 106
| 0.517578
| false
| 4.356795
| false
| false
| false
|
HackerEarth/django-allauth
|
allauth/socialaccount/providers/twitter/views.py
|
1
|
1820
|
from django.utils import simplejson
from allauth.socialaccount.providers.oauth.client import OAuth
from allauth.socialaccount.providers.oauth.views import (OAuthAdapter,
OAuthLoginView,
OAuthCallbackView)
from allauth.socialaccount.models import SocialLogin, SocialAccount
from allauth.utils import get_user_model
from provider import TwitterProvider
User = get_user_model()
class TwitterAPI(OAuth):
"""
Verifying twitter credentials
"""
url = 'https://api.twitter.com/1.1/account/verify_credentials.json'
def get_user_info(self):
user = simplejson.loads(self.query(self.url))
return user
class TwitterOAuthAdapter(OAuthAdapter):
provider_id = TwitterProvider.id
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'https://api.twitter.com/oauth/access_token'
# Issue #42 -- this one authenticates over and over again...
# authorize_url = 'https://api.twitter.com/oauth/authorize'
authorize_url = 'https://api.twitter.com/oauth/authenticate'
def complete_login(self, request, app, token):
client = TwitterAPI(request, app.key, app.secret,
self.request_token_url)
extra_data = client.get_user_info()
uid = extra_data['id']
user = User(username=extra_data['screen_name'])
account = SocialAccount(user=user,
uid=uid,
provider=TwitterProvider.id,
extra_data=extra_data)
return SocialLogin(account)
oauth_login = OAuthLoginView.adapter_view(TwitterOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(TwitterOAuthAdapter)
|
mit
| 985,280,656,928,123,300
| 36.916667
| 75
| 0.636813
| false
| 4.193548
| false
| false
| false
|
skibaa/smart-sweeper
|
game/dbext.py
|
1
|
2811
|
import logging
from google.appengine.ext import db
from google.appengine.api import datastore_errors
import cPickle
logger=logging.getLogger("smartSweeper.dbext")
class PickledProperty(db.Property):
data_type = db.Blob
def __init__(self, force_type=None, *args, **kw):
self.force_type=force_type
super(PickledProperty, self).__init__(*args, **kw)
def validate(self, value):
value = super(PickledProperty, self).validate(value)
if value is not None and self.force_type and \
not isinstance(value, self.force_type):
raise datastore_errors.BadValueError(
'Property %s must be of type "%s".' % (self.name,
self.force_type))
return value
def get_value_for_datastore(self, model_instance):
value = self.__get__(model_instance, model_instance.__class__)
if value is not None:
return db.Text(cPickle.dumps(value))
def make_value_from_datastore(self, value):
if value is not None:
return cPickle.loads(str(value))
class CachedReferenceProperty(db.ReferenceProperty):
def __property_config__(self, model_class, property_name):
super(CachedReferenceProperty, self).__property_config__(model_class,
property_name)
#Just carelessly override what super made
setattr(self.reference_class,
self.collection_name,
_CachedReverseReferenceProperty(model_class, property_name,
self.collection_name))
class _CachedReverseReferenceProperty(db._ReverseReferenceProperty):
def __init__(self, model, prop, collection_name):
super(_CachedReverseReferenceProperty, self).__init__(model, prop)
self.__prop=prop
self.__collection_name = collection_name
def __get__(self, model_instance, model_class):
if model_instance is None:
return self
logger.debug("cached reverse trying")
if self.__collection_name in model_instance.__dict__:# why does it get here at all?
return model_instance.__dict__[self.__collection_name]
logger.info("cached reverse miss %s",self.__collection_name)
query=super(_CachedReverseReferenceProperty, self).__get__(model_instance,
model_class)
#replace the attribute on the instance
res=[]
for c in query:
resolved_name='_RESOLVED_'+self.__prop #WARNING: using internal
setattr(c, resolved_name, model_instance)
res += [c]
model_instance.__dict__[self.__collection_name]=res
return res
def __delete__ (self, model_instance):
if model_instance is not None:
del model_instance.__dict__[self.__collection_name]
|
apache-2.0
| -9,060,400,630,525,390,000
| 37.506849
| 91
| 0.626467
| false
| 4.291603
| false
| false
| false
|
allynt/tings
|
T/tings/views/api/views_api_users.py
|
1
|
1138
|
from rest_framework import generics, permissions
from django.contrib.auth.models import User
# from T.tings.models.models_users import TUserProfile
from T.tings.serializers.serializers_users import TUserSerializer
class TUserPermission(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# anybody can submit GET, HEAD or OPTIONS requests...
if request.method in permissions.SAFE_METHODS:
return True
# only the admin or collection owners can submit PUT, POST, or DELETE requests...
user = request.user
return user.is_superuser or user == obj
class TUserList(generics.ListCreateAPIView):
queryset = User.objects.all()
serializer_class = TUserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, TUserPermission,)
class TUserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = User.objects.all()
serializer_class = TUserSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, TUserPermission,)
|
mit
| 8,404,409,644,057,363,000
| 33.484848
| 89
| 0.742531
| false
| 4.428016
| false
| false
| false
|
akx/gentry
|
gore/api/handlers/store.py
|
1
|
1862
|
import base64
import json
import logging
import zlib
from datetime import datetime
from django.conf import settings
from django.db import transaction
from django.http import JsonResponse
from django.utils.encoding import force_str
from django.utils.timezone import make_aware
from pytz import UTC
from gore.auth import validate_auth_header
from gore.excs import InvalidAuth
from gore.models import Event
from gore.signals import event_received
from gore.utils.event_grouper import group_event
logger = logging.getLogger(__name__)
def store_event(request, project):
try:
auth_header = validate_auth_header(request, project)
except InvalidAuth as ia:
return JsonResponse({'error': str(ia)}, status=401)
body = request.body
if request.META.get('HTTP_CONTENT_ENCODING') == 'deflate':
body = zlib.decompress(body)
elif auth_header.get('sentry_version') == '5': # Support older versions of Raven
body = zlib.decompress(base64.b64decode(body)).decode('utf8')
body = json.loads(force_str(body))
timestamp = make_aware(datetime.fromtimestamp(float(auth_header['sentry_timestamp'])), timezone=UTC)
with transaction.atomic():
event = Event.objects.create_from_raven(project_id=project, body=body, timestamp=timestamp)
try:
with transaction.atomic():
group = group_event(event.project, event)
group.archived = False
group.cache_values()
group.save()
except: # pragma: no cover
logger.warning('event with ID %s could not be grouped' % event.id, exc_info=True)
try:
event_received.send(sender=event)
except: # pragma: no cover
logger.warning('event_received signal handling failed', exc_info=True)
if settings.DEBUG:
raise
return JsonResponse({'id': event.id}, status=201)
|
mit
| 4,651,106,081,188,272,000
| 33.481481
| 104
| 0.6971
| false
| 3.936575
| false
| false
| false
|
django-stars/dash2011
|
presence/apps/shout/views.py
|
1
|
2081
|
import logging
import json
from django.shortcuts import render_to_response
from django.http import Http404
from django.template import RequestContext
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import HttpResponseForbidden
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from shout.models import Shout
from shout.forms import ShoutForm
logger = logging.getLogger("presence.%s" % __name__)
@login_required
def shout_new(request):
if request.method == "POST":
form = ShoutForm(request.POST)
if form.is_valid():
shout = form.save(user=request.user)
logger.info('New %s shout from "%s"' % (('public', 'private')[shout.is_private], shout.user.username))
if request.is_ajax():
return HttpResponse(json.dumps({'response': 'OK'}), mimetype='application/json')
return HttpResponseRedirect(reverse('shout-list'))
else:
if request.is_ajax():
return HttpResponse(json.dumps({'response': 'ERR', 'reason': 'Shout text is required!'}), mimetype='application/json')
else:
form = ShoutForm()
data = {
'form': form,
}
return render_to_response('shout/new.html', data, RequestContext(request))
@login_required
def shout_list(request):
#custom manager to get non provat or privat but my
shouts = Shout.objects.filter_for_user(user=request.user)
data = {
'shouts': shouts,
}
return render_to_response('shout/list.html', data, RequestContext(request))
@login_required
def shout_detail(request, shout_id):
try:
shout = Shout.objects.get_for_user(user=request.user, id=shout_id)
except Shout.DoesNotExist:
raise Http404
data = {
'shout': shout,
}
return render_to_response('shout/detail.html', data, RequestContext(request))
|
bsd-3-clause
| 7,593,957,745,665,859,000
| 31.515625
| 134
| 0.682845
| false
| 3.882463
| false
| false
| false
|
JaviMerino/lisa
|
libs/utils/analysis/frequency_analysis.py
|
1
|
24894
|
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Frequency Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import operator
from trappy.utils import listify
from devlib.utils.misc import memoized
from collections import namedtuple
from analysis_module import AnalysisModule
# Configure logging
import logging
NON_IDLE_STATE = 4294967295
ResidencyTime = namedtuple('ResidencyTime', ['total', 'active'])
ResidencyData = namedtuple('ResidencyData', ['label', 'residency'])
class FrequencyAnalysis(AnalysisModule):
"""
Support for plotting Frequency Analysis data
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(FrequencyAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
def _dfg_cpu_frequency_residency(self, cpu, total=True):
"""
Get per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency.
:param cpu: CPU ID
:type cpu: int
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getCPUFrequencyResidency(cpu)
if not residency:
return None
if total:
return residency.total
return residency.active
def _dfg_cluster_frequency_residency(self, cluster, total=True):
"""
Get per-Cluster frequency residency, i.e. amount of time CLUSTER
`cluster` spent at each frequency.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:param total: if true returns the "total" time, otherwise the "active"
time is returned
:type total: bool
:returns: :mod:`pandas.DataFrame` - "total" or "active" time residency
at each frequency.
"""
residency = self._getClusterFrequencyResidency(cluster)
if not residency:
return None
if total:
return residency.total
return residency.active
###############################################################################
# Plotting Methods
###############################################################################
def plotClusterFrequencies(self, title='Clusters Frequencies'):
"""
Plot frequency trend for all clusters. If sched_overutilized events are
available, the plots will also show the intervals of time where the
cluster was overutilized.
:param title: user-defined plot title
:type title: str
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
df = self._dfg_trace_event('cpu_frequency')
pd.options.mode.chained_assignment = None
# Extract LITTLE and big clusters frequencies
# and scale them to [MHz]
if len(self._platform['clusters']['little']):
lfreq = df[df.cpu == self._platform['clusters']['little'][-1]]
lfreq['frequency'] = lfreq['frequency']/1e3
else:
lfreq = []
if len(self._platform['clusters']['big']):
bfreq = df[df.cpu == self._platform['clusters']['big'][-1]]
bfreq['frequency'] = bfreq['frequency']/1e3
else:
bfreq = []
# Compute AVG frequency for LITTLE cluster
avg_lfreq = 0
if len(lfreq) > 0:
lfreq['timestamp'] = lfreq.index
lfreq['delta'] = (lfreq['timestamp'] -lfreq['timestamp'].shift()).fillna(0).shift(-1)
lfreq['cfreq'] = (lfreq['frequency'] * lfreq['delta']).fillna(0)
timespan = lfreq.iloc[-1].timestamp - lfreq.iloc[0].timestamp
avg_lfreq = lfreq['cfreq'].sum()/timespan
# Compute AVG frequency for big cluster
avg_bfreq = 0
if len(bfreq) > 0:
bfreq['timestamp'] = bfreq.index
bfreq['delta'] = (bfreq['timestamp'] - bfreq['timestamp'].shift()).fillna(0).shift(-1)
bfreq['cfreq'] = (bfreq['frequency'] * bfreq['delta']).fillna(0)
timespan = bfreq.iloc[-1].timestamp - bfreq.iloc[0].timestamp
avg_bfreq = bfreq['cfreq'].sum()/timespan
pd.options.mode.chained_assignment = 'warn'
# Setup a dual cluster plot
fig, pltaxes = plt.subplots(2, 1, figsize=(16, 8))
plt.suptitle(title, y=.97, fontsize=16, horizontalalignment='center')
# Plot Cluster frequencies
axes = pltaxes[0]
axes.set_title('big Cluster')
if avg_bfreq > 0:
axes.axhline(avg_bfreq, color='r', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['big'][0] - 100000)/1e3,
(self._platform['freqs']['big'][-1] + 100000)/1e3
)
if len(bfreq) > 0:
bfreq['frequency'].plot(style=['r-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO big CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
axes.set_xticklabels([])
axes.set_xlabel('')
self._trace.analysis.status.plotOverutilized(axes)
axes = pltaxes[1]
axes.set_title('LITTLE Cluster')
if avg_lfreq > 0:
axes.axhline(avg_lfreq, color='b', linestyle='--', linewidth=2)
axes.set_ylim(
(self._platform['freqs']['little'][0] - 100000)/1e3,
(self._platform['freqs']['little'][-1] + 100000)/1e3
)
if len(lfreq) > 0:
lfreq['frequency'].plot(style=['b-'], ax=axes,
drawstyle='steps-post', alpha=0.4)
else:
logging.warn('NO LITTLE CPUs frequency events to plot')
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_ylabel('MHz')
axes.grid(True)
self._trace.analysis.status.plotOverutilized(axes)
# Save generated plots into datadir
figname = '{}/{}cluster_freqs.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix)
pl.savefig(figname, bbox_inches='tight')
logging.info('LITTLE cluster average frequency: %.3f GHz',
avg_lfreq/1e3)
logging.info('big cluster average frequency: %.3f GHz',
avg_bfreq/1e3)
return (avg_lfreq/1e3, avg_bfreq/1e3)
def plotCPUFrequencyResidency(self, cpus=None, pct=False, active=False):
"""
Plot per-CPU frequency residency. big CPUs are plotted first and then
LITTLEs.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param cpus: List of cpus. By default plot all CPUs
:type cpus: list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
if cpus is None:
# Generate plots only for available CPUs
cpufreq_data = self._dfg_trace_event('cpu_frequency')
_cpus = range(cpufreq_data.cpu.max()+1)
else:
_cpus = listify(cpus)
# Split between big and LITTLE CPUs ordered from higher to lower ID
_cpus.reverse()
big_cpus = [c for c in _cpus if c in self._platform['clusters']['big']]
little_cpus = [c for c in _cpus if c in
self._platform['clusters']['little']]
_cpus = big_cpus + little_cpus
# Precompute active and total time for each CPU
residencies = []
xmax = 0.0
for cpu in _cpus:
res = self._getCPUFrequencyResidency(cpu)
residencies.append(ResidencyData('CPU{}'.format(cpu), res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cpu', xmax, pct, active)
def plotClusterFrequencyResidency(self, clusters=None,
pct=False, active=False):
"""
Plot the frequency residency in a given cluster, i.e. the amount of
time cluster `cluster` spent at frequency `f_i`. By default, both 'big'
and 'LITTLE' clusters data are plotted.
Requires the following trace events:
- cpu_frequency
- cpu_idle
:param clusters: name of the clusters to be plotted (all of them by
default)
:type clusters: str ot list(str)
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, plot DISABLED!')
return
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, plot DISABLED!')
return
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU
if not self._trace.freq_coherency:
logging.warn('Cluster frequency is not coherent, plot DISABLED!')
return
# Sanitize clusters
if clusters is None:
_clusters = self._platform['clusters'].keys()
else:
_clusters = listify(clusters)
# Precompute active and total time for each cluster
residencies = []
xmax = 0.0
for cluster in _clusters:
res = self._getClusterFrequencyResidency(
self._platform['clusters'][cluster.lower()])
residencies.append(ResidencyData('{} Cluster'.format(cluster),
res))
max_time = res.total.max().values[0]
if xmax < max_time:
xmax = max_time
self._plotFrequencyResidency(residencies, 'cluster', xmax, pct, active)
###############################################################################
# Utility Methods
###############################################################################
@memoized
def _getCPUActiveSignal(self, cpu):
"""
Build a square wave representing the active (i.e. non-idle) CPU time,
i.e.:
cpu_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cpu_active[t] == 0 otherwise
:param cpu: CPU ID
:type cpu: int
"""
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'cannot compute CPU active signal!')
return None
idle_df = self._dfg_trace_event('cpu_idle')
cpu_df = idle_df[idle_df.cpu_id == cpu]
cpu_active = cpu_df.state.apply(
lambda s: 1 if s == NON_IDLE_STATE else 0
)
start_time = 0.0
if not self._trace.ftrace.normalized_time:
start_time = self._trace.ftrace.basetime
if cpu_active.index[0] != start_time:
entry_0 = pd.Series(cpu_active.iloc[0] ^ 1, index=[start_time])
cpu_active = pd.concat([entry_0, cpu_active])
return cpu_active
@memoized
def _getClusterActiveSignal(self, cluster):
"""
Build a square wave representing the active (i.e. non-idle) cluster
time, i.e.:
cluster_active[t] == 1 if at least one CPU is reported to be
non-idle by CPUFreq at time t
cluster_active[t] == 0 otherwise
:param cluster: list of CPU IDs belonging to a cluster
:type cluster: list(int)
"""
cpu_active = {}
for cpu in cluster:
cpu_active[cpu] = self._getCPUActiveSignal(cpu)
active = pd.DataFrame(cpu_active)
active.fillna(method='ffill', inplace=True)
# Cluster active is the OR between the actives on each CPU
# belonging to that specific cluster
cluster_active = reduce(
operator.or_,
[cpu_active.astype(int) for _, cpu_active in
active.iteritems()]
)
return cluster_active
@memoized
def _getClusterFrequencyResidency(self, cluster):
"""
Get a DataFrame with per cluster frequency residency, i.e. amount of
time spent at a given frequency in each cluster.
:param cluster: this can be either a single CPU ID or a list of CPU IDs
belonging to a cluster or the cluster name as specified in the
platform description
:type cluster: str or int or list(int)
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
:raises: KeyError
"""
if not self._trace.hasEvents('cpu_frequency'):
logging.warn('Events [cpu_frequency] not found, '
'frequency residency computation not possible!')
return None
if not self._trace.hasEvents('cpu_idle'):
logging.warn('Events [cpu_idle] not found, '
'frequency residency computation not possible!')
return None
if isinstance(cluster, str):
try:
_cluster = self._platform['clusters'][cluster.lower()]
except KeyError:
logging.warn('%s cluster not found!', cluster)
return None
else:
_cluster = listify(cluster)
freq_df = self._dfg_trace_event('cpu_frequency')
# Assumption: all CPUs in a cluster run at the same frequency, i.e. the
# frequency is scaled per-cluster not per-CPU. Hence, we can limit the
# cluster frequencies data to a single CPU. This assumption is verified
# by the Trace module when parsing the trace.
if len(_cluster) > 1 and not self._trace.freq_coherency:
logging.warn('Cluster frequency is NOT coherent,'
'cannot compute residency!')
return None
cluster_freqs = freq_df[freq_df.cpu == _cluster[0]]
# Compute TOTAL Time
time_intervals = cluster_freqs.index[1:] - cluster_freqs.index[:-1]
total_time = pd.DataFrame({
'time': time_intervals,
'frequency': [f/1000.0 for f in cluster_freqs.iloc[:-1].frequency]
})
total_time = total_time.groupby(['frequency']).sum()
# Compute ACTIVE Time
cluster_active = self._getClusterActiveSignal(_cluster)
# In order to compute the active time spent at each frequency we
# multiply 2 square waves:
# - cluster_active, a square wave of the form:
# cluster_active[t] == 1 if at least one CPU is reported to be
# non-idle by CPUFreq at time t
# cluster_active[t] == 0 otherwise
# - freq_active, square wave of the form:
# freq_active[t] == 1 if at time t the frequency is f
# freq_active[t] == 0 otherwise
available_freqs = sorted(cluster_freqs.frequency.unique())
new_idx = sorted(cluster_freqs.index.tolist() +
cluster_active.index.tolist())
cluster_freqs = cluster_freqs.reindex(new_idx, method='ffill')
cluster_active = cluster_active.reindex(new_idx, method='ffill')
nonidle_time = []
for f in available_freqs:
freq_active = cluster_freqs.frequency.apply(
lambda x: 1 if x == f else 0
)
active_t = cluster_active * freq_active
# Compute total time by integrating the square wave
nonidle_time.append(self._trace.integrate_square_wave(active_t))
active_time = pd.DataFrame({'time': nonidle_time},
index=[f/1000.0 for f in available_freqs])
active_time.index.name = 'frequency'
return ResidencyTime(total_time, active_time)
def _getCPUFrequencyResidency(self, cpu):
"""
Get a DataFrame with per-CPU frequency residency, i.e. amount of
time CPU `cpu` spent at each frequency. Both total and active times
will be computed.
:param cpu: CPU ID
:type cpu: int
:returns: namedtuple(ResidencyTime) - tuple of total and active time
dataframes
"""
return self._getClusterFrequencyResidency(cpu)
def _plotFrequencyResidencyAbs(self, axes, residency, n_plots,
is_first, is_last, xmax, title=''):
"""
Private method to generate frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency: tuple of total and active time dataframes
:type residency: namedtuple(ResidencyTime)
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_last: if True this is the last plot
:type is_last: bool
:param xmax: x-axes higher bound
:param xmax: double
:param title: title of this subplot
:type title: str
"""
yrange = 0.4 * max(6, len(residency.total)) * n_plots
residency.total.plot.barh(ax=axes, color='g',
legend=False, figsize=(16, yrange))
residency.active.plot.barh(ax=axes, color='r',
legend=False, figsize=(16, yrange))
axes.set_xlim(0, 1.05*xmax)
axes.set_ylabel('Frequency [MHz]')
axes.set_title(title)
axes.grid(True)
if is_last:
axes.set_xlabel('Time [s]')
else:
axes.set_xticklabels([])
if is_first:
# Put title on top of the figure. As of now there is no clean way
# to make the title appear always in the same position in the
# figure because figure heights may vary between different
# platforms (different number of OPPs). Hence, we use annotation
legend_y = axes.get_ylim()[1]
axes.annotate('OPP Residency Time', xy=(0, legend_y),
xytext=(-50, 45), textcoords='offset points',
fontsize=18)
axes.annotate('GREEN: Total', xy=(0, legend_y),
xytext=(-50, 25), textcoords='offset points',
color='g', fontsize=14)
axes.annotate('RED: Active', xy=(0, legend_y),
xytext=(50, 25), textcoords='offset points',
color='r', fontsize=14)
def _plotFrequencyResidencyPct(self, axes, residency_df, label,
n_plots, is_first, is_last, res_type):
"""
Private method to generate PERCENTAGE frequency residency plots.
:param axes: axes over which to generate the plot
:type axes: matplotlib.axes.Axes
:param residency_df: residency time dataframe
:type residency_df: :mod:`pandas.DataFrame`
:param label: label to be used for percentage residency dataframe
:type label: str
:param n_plots: total number of plots
:type n_plots: int
:param is_first: if True this is the first plot
:type is_first: bool
:param is_first: if True this is the last plot
:type is_first: bool
:param res_type: type of residency, either TOTAL or ACTIVE
:type title: str
"""
# Compute sum of the time intervals
duration = residency_df.time.sum()
residency_pct = pd.DataFrame(
{label: residency_df.time.apply(lambda x: x*100/duration)},
index=residency_df.index
)
yrange = 3 * n_plots
residency_pct.T.plot.barh(ax=axes, stacked=True, figsize=(16, yrange))
axes.legend(loc='lower center', ncol=7)
axes.set_xlim(0, 100)
axes.grid(True)
if is_last:
axes.set_xlabel('Residency [%]')
else:
axes.set_xticklabels([])
if is_first:
legend_y = axes.get_ylim()[1]
axes.annotate('OPP {} Residency Time'.format(res_type),
xy=(0, legend_y), xytext=(-50, 35),
textcoords='offset points', fontsize=18)
def _plotFrequencyResidency(self, residencies, entity_name, xmax,
pct, active):
"""
Generate Frequency residency plots for the given entities.
:param residencies:
:type residencies: namedtuple(ResidencyData) - tuple containing:
1) as first element, a label to be used as subplot title
2) as second element, a namedtuple(ResidencyTime)
:param entity_name: name of the entity ('cpu' or 'cluster') used in the
figure name
:type entity_name: str
:param xmax: upper bound of x-axes
:type xmax: double
:param pct: plot residencies in percentage
:type pct: bool
:param active: for percentage plot specify whether to plot active or
total time. Default is TOTAL time
:type active: bool
"""
n_plots = len(residencies)
gs = gridspec.GridSpec(n_plots, 1)
fig = plt.figure()
figtype = ""
for idx, data in enumerate(residencies):
if data.residency is None:
plt.close(fig)
return
axes = fig.add_subplot(gs[idx])
is_first = idx == 0
is_last = idx+1 == n_plots
if pct and active:
self._plotFrequencyResidencyPct(axes, data.residency.active,
data.label, n_plots,
is_first, is_last,
'ACTIVE')
figtype = "_pct_active"
continue
if pct:
self._plotFrequencyResidencyPct(axes, data.residency.total,
data.label, n_plots,
is_first, is_last,
'TOTAL')
figtype = "_pct_total"
continue
self._plotFrequencyResidencyAbs(axes, data.residency,
n_plots, is_first,
is_last, xmax,
title=data.label)
figname = '{}/{}{}_freq_residency{}.png'\
.format(self._trace.plots_dir,
self._trace.plots_prefix,
entity_name, figtype)
pl.savefig(figname, bbox_inches='tight')
# vim :set tabstop=4 shiftwidth=4 expandtab
|
apache-2.0
| -7,015,758,231,730,057,000
| 37.180982
| 98
| 0.555676
| false
| 4.189498
| false
| false
| false
|
cburmeister/flask-bones
|
app/commands.py
|
1
|
1163
|
from faker import Faker
import click
from app.database import db
from app.user.models import User
@click.option('--num_users', default=5, help='Number of users.')
def populate_db(num_users):
"""Populates the database with seed data."""
fake = Faker()
users = []
for _ in range(num_users):
users.append(
User(
username=fake.user_name(),
email=fake.email(),
password=fake.word() + fake.word(),
remote_addr=fake.ipv4()
)
)
users.append(
User(
username='cburmeister',
email='cburmeister@discogs.com',
password='test123',
remote_addr=fake.ipv4(),
active=True,
is_admin=True
)
)
for user in users:
db.session.add(user)
db.session.commit()
def create_db():
"""Creates the database."""
db.create_all()
def drop_db():
"""Drops the database."""
if click.confirm('Are you sure?', abort=True):
db.drop_all()
def recreate_db():
"""Same as running drop_db() and create_db()."""
drop_db()
create_db()
|
mit
| -36,645,924,411,596,520
| 21.803922
| 64
| 0.536543
| false
| 3.788274
| false
| false
| false
|
rbuffat/pyidf
|
tests/test_shadingsite.py
|
1
|
2163
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.thermal_zones_and_surfaces import ShadingSite
log = logging.getLogger(__name__)
class TestShadingSite(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_shadingsite(self):
pyidf.validation_level = ValidationLevel.error
obj = ShadingSite()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_azimuth_angle = 180.0
obj.azimuth_angle = var_azimuth_angle
# real
var_tilt_angle = 90.0
obj.tilt_angle = var_tilt_angle
# real
var_starting_x_coordinate = 4.4
obj.starting_x_coordinate = var_starting_x_coordinate
# real
var_starting_y_coordinate = 5.5
obj.starting_y_coordinate = var_starting_y_coordinate
# real
var_starting_z_coordinate = 6.6
obj.starting_z_coordinate = var_starting_z_coordinate
# real
var_length = 7.7
obj.length = var_length
# real
var_height = 8.8
obj.height = var_height
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.shadingsites[0].name, var_name)
self.assertAlmostEqual(idf2.shadingsites[0].azimuth_angle, var_azimuth_angle)
self.assertAlmostEqual(idf2.shadingsites[0].tilt_angle, var_tilt_angle)
self.assertAlmostEqual(idf2.shadingsites[0].starting_x_coordinate, var_starting_x_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].starting_y_coordinate, var_starting_y_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].starting_z_coordinate, var_starting_z_coordinate)
self.assertAlmostEqual(idf2.shadingsites[0].length, var_length)
self.assertAlmostEqual(idf2.shadingsites[0].height, var_height)
|
apache-2.0
| -7,739,266,213,189,500,000
| 31.787879
| 101
| 0.645862
| false
| 3.581126
| false
| false
| false
|
mitmedialab/MediaCloud-Web-Tools
|
server/views/topics/topiccreate.py
|
1
|
3535
|
import logging
from flask import jsonify, request
import flask_login
import mediacloud.error
from server import app, mc
from server.auth import user_mediacloud_client
from server.util.request import form_fields_required, api_error_handler, json_error_response, arguments_required
from server.views.topics.topic import topic_summary
logger = logging.getLogger(__name__)
VERSION_1 = 1
COLLECTION_US_TOP_ONLINE = 58722749
@app.route('/api/topics/create', methods=['PUT'])
@flask_login.login_required
@form_fields_required('name', 'description', 'solr_seed_query', 'start_date', 'end_date')
@api_error_handler
def topic_create():
user_mc = user_mediacloud_client()
name = request.form['name']
description = request.form['description']
solr_seed_query = request.form['solr_seed_query']
start_date = request.form['start_date']
end_date = request.form['end_date']
optional_args = {
'max_iterations': request.form['max_iterations'] if 'max_iterations' in request.form and request.form['max_iterations'] != 'null' else None,
'max_stories': request.form['max_stories'] if 'max_stories' in request.form and request.form['max_stories'] != 'null' else flask_login.current_user.profile['limits']['max_topic_stories'],
}
try:
topic_result = user_mc.topicCreate(name=name, description=description, solr_seed_query=solr_seed_query,
start_date=start_date, end_date=end_date,
media_tags_ids=[COLLECTION_US_TOP_ONLINE], # HACK: can't save without one of these in place (for now)
**optional_args,
)['topics'][0]
topics_id = topic_result['topics_id']
logger.info("Created new topic \"{}\" as {}".format(name, topics_id))
# if this includes any of the US-centric collections, add the retweet partisanship subtopic by default
# client will either make a empty snapshot, or a spidering one
return topic_summary(topics_id)
except mediacloud.error.MCException as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(e.message, e.status_code)
except Exception as e:
logging.error("Topic creation failed {}".format(name))
logging.exception(e)
return json_error_response(str(e), 500)
@app.route('/api/topics/name-exists', methods=['GET'])
@flask_login.login_required
@arguments_required('searchStr')
@api_error_handler
def topic_name_exists():
# Check if topic with name exists already
# Have to do this in a unique method, instead of in topic_search because we need to use an admin connection
# to media cloud to list all topics, but we don't want to return topics a user can't see to them.
# :return: boolean indicating if topic with this name exists for not (case insensive check)
search_str = request.args['searchStr']
topics_id = int(request.args['topicId']) if 'topicId' in request.args else None
matching_topics = mc.topicList(name=search_str, limit=15)
if topics_id:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']
if t['topics_id'] != topics_id]
else:
matching_topic_names = [t['name'].lower().strip() for t in matching_topics['topics']]
name_in_use = search_str.lower() in matching_topic_names
return jsonify({'nameInUse': name_in_use})
|
apache-2.0
| -6,057,934,450,072,376,000
| 48.788732
| 195
| 0.660255
| false
| 3.748674
| false
| false
| false
|
jeffsilverm/presentation
|
whats_new_in_python_3.6/type_hints_complicated.py
|
1
|
1308
|
#! /usr/bin/python3.6
# -*- coding: utf-8 -*-
import time
import sys
assert sys.version_info.major == 3 and sys.version_info.minor == 6, "Not running python 3.6, running {}".format(
sys.version_info)
class A(object):
def __init__(self, instance_mark) -> None:
self.instance_mark_A = instance_mark
def af_A(self, input):
return input * 2
def afo_A(self, input):
return input * 4
class AA(A):
def __init__(self, instance_marker) -> None:
super()
self.instance_marker = instance_marker
def aaf_AA(self, method_input):
return method_input * 20
def afo_A(self, method_input):
return method_input ** 2
class B(object):
def __init__(self):
pass
def bf_B(self, method_input):
return method_input * 9
a = A("marker a")
aa = AA("marker aa")
print("a.af_A(4) ", a.af_A(4))
print("a.afo_A(4) ", a.afo_A(4))
print("aa.aaf_AA(4) ", aa.aaf_AA(4))
print("aa.afo_A(4) ", aa.afo_A(4))
print("a.af_A('4') ", a.af_A('4'))
print("a.afo_A('4') ", a.afo_A('4'))
print("aa.aaf_AA('4') ", aa.aaf_AA('4'), flush=True)
try:
print("aa.afo_A('4') ", aa.afo_A('4'))
except TypeError as t:
time.sleep(1)
print("Exception TypeError was raised, as expected, when calling aa.afo_A('4'))", file=sys.stderr)
|
gpl-2.0
| 859,038,877,987,351,700
| 22.357143
| 112
| 0.58104
| false
| 2.730689
| false
| false
| false
|
Crowdcomputer/CC
|
crowdcomputer/init_db.py
|
1
|
1613
|
'''
Created on Nov 26, 2012
@author: stefanotranquillini
'''
from django.contrib.auth.models import User, Group
from rest_framework.authtoken.models import Token
from general.models import Application
from uuid import uuid4
def init():
initAppsAndCC()
def initAppsAndCC():
try:
user, c = User.objects.get_or_create(username='crowdcomputer',email="crowdcomputer@gmail.com",password="this.is.spam")
user.save()
print "%s %s"%(user.username,c)
app, c = Application.objects.get_or_create(name="crowdcomputer",url="http://www.crowdcomputer.org",user=user)
if c:
app.token=str(uuid4()).replace('-','')
app.save()
print "%s %s" %(app.name, app.token)
app, c = Application.objects.get_or_create(name="bpmn",url="http://www.crowdcomputer.org",user=user)
if c:
app.token=str(uuid4()).replace('-','')
print "%s %s" %(app.name, app.token)
app.save()
bpmn, c = Group.objects.get_or_create(name='bpmn')
bpmn.save()
except Exception, e:
print e
print 'exception'
def createAdmin(username,password,email):
try:
admin, c = User.objects.get_or_create(email=email)
if c:
admin.set_password(password)
admin.username=username
admin.is_superuser = True
admin.is_staff = True
admin.save()
print 'creato'
else:
admin.set_password(password)
admin.save()
print 'aggiornato'
except Exception:
print 'exception'
|
apache-2.0
| -2,273,612,954,416,833,300
| 28.345455
| 126
| 0.588965
| false
| 3.506522
| false
| false
| false
|
DailyActie/Surrogate-Model
|
01-codes/numpy-master/numpy/matrixlib/defmatrix.py
|
1
|
34262
|
from __future__ import division, absolute_import, print_function
__all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar, binary_repr, identity, asanyarray
from numpy.core.numerictypes import issubdtype
# make translation table
_numchars = '0123456789.-+jeEL'
if sys.version_info[0] >= 3:
class _NumCharTable:
def __getitem__(self, i):
if chr(i) in _numchars:
return chr(i)
else:
return None
_table = _NumCharTable()
def _eval(astr):
str_ = astr.translate(_table)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
else:
_table = [None] * 256
for k in range(256):
_table[k] = chr(k)
_table = ''.join(_table)
_todelete = []
for k in _table:
if k not in _numchars:
_todelete.append(k)
_todelete = ''.join(_todelete)
del k
def _eval(astr):
str_ = astr.translate(_table, _todelete)
if not str_:
raise TypeError("Invalid data string supplied: " + astr)
else:
return eval(str_)
def _convert_from_string(data):
rows = data.split(';')
newdata = []
count = 0
for row in rows:
trow = row.split(',')
newrow = []
for col in trow:
temp = col.split()
newrow.extend(map(_eval, temp))
if count == 0:
Ncols = len(newrow)
elif len(newrow) != Ncols:
raise ValueError("Rows not the same size.")
count += 1
newdata.append(newrow)
return newdata
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
Unlike `matrix`, `asmatrix` does not make a copy if the input is already
a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
Parameters
----------
data : array_like
Input data.
dtype : data-type
Data-type of the output matrix.
Returns
-------
mat : matrix
`data` interpreted as a matrix.
Examples
--------
>>> x = np.array([[1, 2], [3, 4]])
>>> m = np.asmatrix(x)
>>> x[0,0] = 5
>>> m
matrix([[5, 2],
[3, 4]])
"""
return matrix(data, dtype=dtype, copy=False)
def matrix_power(M, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
M : ndarray or matrix object
Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
with `m` a positive integer.
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
M**n : ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
If the matrix is not numerically invertible.
See Also
--------
matrix
Provides an equivalent function as the exponentiation operator
(``**``, not ``^``).
Examples
--------
>>> from numpy import linalg as LA
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> LA.matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
matrix([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> LA.matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
M = asanyarray(M)
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n), int):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n == 0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n < 0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n - 1):
result = N.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t - q - 1] == '0':
Z = N.dot(Z, Z)
q += 1
result = Z
for k in range(q + 1, t):
Z = N.dot(Z, Z)
if beta[t - k - 1] == '1':
result = N.dot(result, Z)
return result
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
Returns a matrix from an array-like object, or from a string of data.
A matrix is a specialized 2-D array that retains its 2-D nature
through operations. It has certain special operators, such as ``*``
(matrix multiplication) and ``**`` (matrix power).
Parameters
----------
data : array_like or string
If `data` is a string, it is interpreted as a matrix with commas
or spaces separating columns, and semicolons separating rows.
dtype : data-type
Data-type of the output matrix.
copy : bool
If `data` is already an `ndarray`, then this flag determines
whether the data is copied (the default), or whether a view is
constructed.
See Also
--------
array
Examples
--------
>>> a = np.matrix('1 2; 3 4')
>>> print(a)
[[1 2]
[3 4]]
>>> np.matrix([[1, 2], [3, 4]])
matrix([[1, 2],
[3, 4]])
"""
__array_priority__ = 10.0
def __new__(subtype, data, dtype=None, copy=True):
if isinstance(data, matrix):
dtype2 = data.dtype
if (dtype is None):
dtype = dtype2
if (dtype2 == dtype) and (not copy):
return data
return data.astype(dtype)
if isinstance(data, N.ndarray):
if dtype is None:
intype = data.dtype
else:
intype = N.dtype(dtype)
new = data.view(subtype)
if intype != data.dtype:
return new.astype(intype)
if copy:
return new.copy()
else:
return new
if isinstance(data, str):
data = _convert_from_string(data)
# now convert data to an array
arr = N.array(data, dtype=dtype, copy=copy)
ndim = arr.ndim
shape = arr.shape
if (ndim > 2):
raise ValueError("matrix must be 2-dimensional")
elif ndim == 0:
shape = (1, 1)
elif ndim == 1:
shape = (1, shape[0])
order = 'C'
if (ndim == 2) and arr.flags.fortran:
order = 'F'
if not (order or arr.flags.contiguous):
arr = arr.copy()
ret = N.ndarray.__new__(subtype, shape, arr.dtype,
buffer=arr,
order=order)
return ret
def __array_finalize__(self, obj):
self._getitem = False
if (isinstance(obj, matrix) and obj._getitem): return
ndim = self.ndim
if (ndim == 2):
return
if (ndim > 2):
newshape = tuple([x for x in self.shape if x > 1])
ndim = len(newshape)
if ndim == 2:
self.shape = newshape
return
elif (ndim > 2):
raise ValueError("shape too large to be a matrix.")
else:
newshape = self.shape
if ndim == 0:
self.shape = (1, 1)
elif ndim == 1:
self.shape = (1, newshape[0])
return
def __getitem__(self, index):
self._getitem = True
try:
out = N.ndarray.__getitem__(self, index)
finally:
self._getitem = False
if not isinstance(out, N.ndarray):
return out
if out.ndim == 0:
return out[()]
if out.ndim == 1:
sh = out.shape[0]
# Determine when we should have a column array
try:
n = len(index)
except:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
else:
out.shape = (1, sh)
return out
def __mul__(self, other):
if isinstance(other, (N.ndarray, list, tuple)):
# This promotes 1-D vectors to row vectors
return N.dot(self, asmatrix(other))
if isscalar(other) or not hasattr(other, '__rmul__'):
return N.dot(self, other)
return NotImplemented
def __rmul__(self, other):
return N.dot(other, self)
def __imul__(self, other):
self[:] = self * other
return self
def __pow__(self, other):
return matrix_power(self, other)
def __ipow__(self, other):
self[:] = self ** other
return self
def __rpow__(self, other):
return NotImplemented
def __repr__(self):
s = repr(self.__array__()).replace('array', 'matrix')
# now, 'matrix' has 6 letters, and 'array' 5, so the columns don't
# line up anymore. We need to add a space.
l = s.splitlines()
for i in range(1, len(l)):
if l[i]:
l[i] = ' ' + l[i]
return '\n'.join(l)
def __str__(self):
return str(self.__array__())
def _align(self, axis):
"""A convenience function for operations that need to preserve axis
orientation.
"""
if axis is None:
return self[0, 0]
elif axis == 0:
return self
elif axis == 1:
return self.transpose()
else:
raise ValueError("unsupported axis")
def _collapse(self, axis):
"""A convenience function for operations that want to collapse
to a scalar like _align, but are using keepdims=True
"""
if axis is None:
return self[0, 0]
else:
return self
# Necessary because base-class tolist expects dimension
# reduction by x[0]
def tolist(self):
"""
Return the matrix as a (possibly nested) list.
See `ndarray.tolist` for full documentation.
See Also
--------
ndarray.tolist
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.tolist()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
"""
return self.__array__().tolist()
# To preserve orientation of result...
def sum(self, axis=None, dtype=None, out=None):
"""
Returns the sum of the matrix elements, along the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum
Notes
-----
This is the same as `ndarray.sum`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix([[1, 2], [4, 3]])
>>> x.sum()
10
>>> x.sum(axis=1)
matrix([[3],
[7]])
>>> x.sum(axis=1, dtype='float')
matrix([[ 3.],
[ 7.]])
>>> out = np.zeros((1, 2), dtype='float')
>>> x.sum(axis=1, dtype='float', out=out)
matrix([[ 3.],
[ 7.]])
"""
return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
# To update docstring from array to matrix...
def squeeze(self, axis=None):
"""
Return a possibly reshaped matrix.
Refer to `numpy.squeeze` for more documentation.
Parameters
----------
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the shape.
If an axis is selected with shape entry greater than one,
an error is raised.
Returns
-------
squeezed : matrix
The matrix, but as a (1, N) matrix if it had shape (N, 1).
See Also
--------
numpy.squeeze : related function
Notes
-----
If `m` has a single column then that column is returned
as the single row of a matrix. Otherwise `m` is returned.
The returned matrix is always either `m` itself or a view into `m`.
Supplying an axis keyword argument will not affect the returned matrix
but it may cause an error to be raised.
Examples
--------
>>> c = np.matrix([[1], [2]])
>>> c
matrix([[1],
[2]])
>>> c.squeeze()
matrix([[1, 2]])
>>> r = c.T
>>> r
matrix([[1, 2]])
>>> r.squeeze()
matrix([[1, 2]])
>>> m = np.matrix([[1, 2], [3, 4]])
>>> m.squeeze()
matrix([[1, 2],
[3, 4]])
"""
return N.ndarray.squeeze(self, axis=axis)
# To update docstring from array to matrix...
def flatten(self, order='C'):
"""
Return a flattened copy of the matrix.
All `N` elements of the matrix are placed into a single row.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order. 'F' means to
flatten in column-major (Fortran-style) order. 'A' means to
flatten in column-major order if `m` is Fortran *contiguous* in
memory, row-major order otherwise. 'K' means to flatten `m` in
the order the elements occur in memory. The default is 'C'.
Returns
-------
y : matrix
A copy of the matrix, flattened to a `(1, N)` matrix where `N`
is the number of elements in the original matrix.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the matrix.
Examples
--------
>>> m = np.matrix([[1,2], [3,4]])
>>> m.flatten()
matrix([[1, 2, 3, 4]])
>>> m.flatten('F')
matrix([[1, 3, 2, 4]])
"""
return N.ndarray.flatten(self, order=order)
def mean(self, axis=None, dtype=None, out=None):
"""
Returns the average of the matrix elements along the given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean
Notes
-----
Same as `ndarray.mean` except that, where that returns an `ndarray`,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.mean()
5.5
>>> x.mean(0)
matrix([[ 4., 5., 6., 7.]])
>>> x.mean(1)
matrix([[ 1.5],
[ 5.5],
[ 9.5]])
"""
return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
def std(self, axis=None, dtype=None, out=None, ddof=0):
"""
Return the standard deviation of the array elements along the given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std
Notes
-----
This is the same as `ndarray.std`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.std()
3.4520525295346629
>>> x.std(0)
matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]])
>>> x.std(1)
matrix([[ 1.11803399],
[ 1.11803399],
[ 1.11803399]])
"""
return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def var(self, axis=None, dtype=None, out=None, ddof=0):
"""
Returns the variance of the matrix elements, along the given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var
Notes
-----
This is the same as `ndarray.var`, except that where an `ndarray` would
be returned, a `matrix` object is returned instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3, 4)))
>>> x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.var()
11.916666666666666
>>> x.var(0)
matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]])
>>> x.var(1)
matrix([[ 1.25],
[ 1.25],
[ 1.25]])
"""
return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
def prod(self, axis=None, dtype=None, out=None):
"""
Return the product of the array elements over the given axis.
Refer to `prod` for full documentation.
See Also
--------
prod, ndarray.prod
Notes
-----
Same as `ndarray.prod`, except, where that returns an `ndarray`, this
returns a `matrix` object instead.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.prod()
0
>>> x.prod(0)
matrix([[ 0, 45, 120, 231]])
>>> x.prod(1)
matrix([[ 0],
[ 840],
[7920]])
"""
return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
def any(self, axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Refer to `numpy.any` for full documentation.
Parameters
----------
axis : int, optional
Axis along which logical OR is performed
out : ndarray, optional
Output to existing array instead of creating new one, must have
same shape as expected output
Returns
-------
any : bool, ndarray
Returns a single bool if `axis` is ``None``; otherwise,
returns `ndarray`
"""
return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
def all(self, axis=None, out=None):
"""
Test whether all matrix elements along a given axis evaluate to True.
Parameters
----------
See `numpy.all` for complete descriptions
See Also
--------
numpy.all
Notes
-----
This is the same as `ndarray.all`, but it returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> y = x[0]; y
matrix([[0, 1, 2, 3]])
>>> (x == y)
matrix([[ True, True, True, True],
[False, False, False, False],
[False, False, False, False]], dtype=bool)
>>> (x == y).all()
False
>>> (x == y).all(0)
matrix([[False, False, False, False]], dtype=bool)
>>> (x == y).all(1)
matrix([[ True],
[False],
[False]], dtype=bool)
"""
return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
def max(self, axis=None, out=None):
"""
Return the maximum value along an axis.
Parameters
----------
See `amax` for complete descriptions
See Also
--------
amax, ndarray.max
Notes
-----
This is the same as `ndarray.max`, but returns a `matrix` object
where `ndarray.max` would return an ndarray.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.max()
11
>>> x.max(0)
matrix([[ 8, 9, 10, 11]])
>>> x.max(1)
matrix([[ 3],
[ 7],
[11]])
"""
return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
def argmax(self, axis=None, out=None):
"""
Indexes of the maximum values along an axis.
Return the indexes of the first occurrences of the maximum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmax` for complete descriptions
See Also
--------
numpy.argmax
Notes
-----
This is the same as `ndarray.argmax`, but returns a `matrix` object
where `ndarray.argmax` would return an `ndarray`.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.argmax()
11
>>> x.argmax(0)
matrix([[2, 2, 2, 2]])
>>> x.argmax(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmax(self, axis, out)._align(axis)
def min(self, axis=None, out=None):
"""
Return the minimum value along an axis.
Parameters
----------
See `amin` for complete descriptions.
See Also
--------
amin, ndarray.min
Notes
-----
This is the same as `ndarray.min`, but returns a `matrix` object
where `ndarray.min` would return an ndarray.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.min()
-11
>>> x.min(0)
matrix([[ -8, -9, -10, -11]])
>>> x.min(1)
matrix([[ -3],
[ -7],
[-11]])
"""
return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
def argmin(self, axis=None, out=None):
"""
Indexes of the minimum values along an axis.
Return the indexes of the first occurrences of the minimum values
along the specified axis. If axis is None, the index is for the
flattened matrix.
Parameters
----------
See `numpy.argmin` for complete descriptions.
See Also
--------
numpy.argmin
Notes
-----
This is the same as `ndarray.argmin`, but returns a `matrix` object
where `ndarray.argmin` would return an `ndarray`.
Examples
--------
>>> x = -np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, -1, -2, -3],
[ -4, -5, -6, -7],
[ -8, -9, -10, -11]])
>>> x.argmin()
11
>>> x.argmin(0)
matrix([[2, 2, 2, 2]])
>>> x.argmin(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.argmin(self, axis, out)._align(axis)
def ptp(self, axis=None, out=None):
"""
Peak-to-peak (maximum - minimum) value along the given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp
Notes
-----
Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
this returns a `matrix` object.
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.ptp()
11
>>> x.ptp(0)
matrix([[8, 8, 8, 8]])
>>> x.ptp(1)
matrix([[3],
[3],
[3]])
"""
return N.ndarray.ptp(self, axis, out)._align(axis)
def getI(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
Parameters
----------
None
Returns
-------
ret : matrix object
If `self` is non-singular, `ret` is such that ``ret * self`` ==
``self * ret`` == ``np.matrix(np.eye(self[0,:].size)`` all return
``True``.
Raises
------
numpy.linalg.LinAlgError: Singular matrix
If `self` is singular.
See Also
--------
linalg.inv
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]'); m
matrix([[1, 2],
[3, 4]])
>>> m.getI()
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
matrix([[ 1., 0.],
[ 0., 1.]])
"""
M, N = self.shape
if M == N:
from numpy.dual import inv as func
else:
from numpy.dual import pinv as func
return asmatrix(func(self))
def getA(self):
"""
Return `self` as an `ndarray` object.
Equivalent to ``np.asarray(self)``.
Parameters
----------
None
Returns
-------
ret : ndarray
`self` as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA()
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
"""
return self.__array__()
def getA1(self):
"""
Return `self` as a flattened `ndarray`.
Equivalent to ``np.asarray(x).ravel()``
Parameters
----------
None
Returns
-------
ret : ndarray
`self`, 1-D, as an `ndarray`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4))); x
matrix([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA1()
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
return self.__array__().ravel()
def ravel(self, order='C'):
"""
Return a flattened matrix.
Refer to `numpy.ravel` for more documentation.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
The elements of `m` are read using this index order. 'C' means to
index the elements in C-like order, with the last axis index
changing fastest, back to the first axis index changing slowest.
'F' means to index the elements in Fortran-like index order, with
the first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of the
memory layout of the underlying array, and only refer to the order
of axis indexing. 'A' means to read the elements in Fortran-like
index order if `m` is Fortran *contiguous* in memory, C-like order
otherwise. 'K' means to read the elements in the order they occur
in memory, except for reversing the data when strides are negative.
By default, 'C' index order is used.
Returns
-------
ret : matrix
Return the matrix flattened to shape `(1, N)` where `N`
is the number of elements in the original matrix.
A copy is made only if necessary.
See Also
--------
matrix.flatten : returns a similar output matrix but always a copy
matrix.flat : a flat iterator on the array.
numpy.ravel : related function which returns an ndarray
"""
return N.ndarray.ravel(self, order=order)
def getT(self):
"""
Returns the transpose of the matrix.
Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
Parameters
----------
None
Returns
-------
ret : matrix object
The (non-conjugated) transpose of the matrix.
See Also
--------
transpose, getH
Examples
--------
>>> m = np.matrix('[1, 2; 3, 4]')
>>> m
matrix([[1, 2],
[3, 4]])
>>> m.getT()
matrix([[1, 3],
[2, 4]])
"""
return self.transpose()
def getH(self):
"""
Returns the (complex) conjugate transpose of `self`.
Equivalent to ``np.transpose(self)`` if `self` is real-valued.
Parameters
----------
None
Returns
-------
ret : matrix object
complex conjugate transpose of `self`
Examples
--------
>>> x = np.matrix(np.arange(12).reshape((3,4)))
>>> z = x - 1j*x; z
matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
[ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
[ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
>>> z.getH()
matrix([[ 0. +0.j, 4. +4.j, 8. +8.j],
[ 1. +1.j, 5. +5.j, 9. +9.j],
[ 2. +2.j, 6. +6.j, 10.+10.j],
[ 3. +3.j, 7. +7.j, 11.+11.j]])
"""
if issubclass(self.dtype.type, N.complexfloating):
return self.transpose().conjugate()
else:
return self.transpose()
T = property(getT, None)
A = property(getA, None)
A1 = property(getA1, None)
H = property(getH, None)
I = property(getI, None)
def _from_string(str, gdict, ldict):
rows = str.split(';')
rowtup = []
for row in rows:
trow = row.split(',')
newrow = []
for x in trow:
newrow.extend(x.split())
trow = newrow
coltup = []
for col in trow:
col = col.strip()
try:
thismat = ldict[col]
except KeyError:
try:
thismat = gdict[col]
except KeyError:
raise KeyError("%s not found" % (col,))
coltup.append(thismat)
rowtup.append(concatenate(coltup, axis=-1))
return concatenate(rowtup, axis=0)
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
Parameters
----------
obj : str or array_like
Input data. Names of variables in the current scope may be
referenced, even if `obj` is a string.
ldict : dict, optional
A dictionary that replaces local operands in current frame.
Ignored if `obj` is not a string or `gdict` is `None`.
gdict : dict, optional
A dictionary that replaces global operands in current frame.
Ignored if `obj` is not a string.
Returns
-------
out : matrix
Returns a matrix object, which is a specialized 2-D array.
See Also
--------
matrix
Examples
--------
>>> A = np.mat('1 1; 1 1')
>>> B = np.mat('2 2; 2 2')
>>> C = np.mat('3 4; 5 6')
>>> D = np.mat('7 8; 9 0')
All the following expressions construct the same block matrix:
>>> np.bmat([[A, B], [C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
>>> np.bmat('A,B; C,D')
matrix([[1, 1, 2, 2],
[1, 1, 2, 2],
[3, 4, 7, 8],
[5, 6, 9, 0]])
"""
if isinstance(obj, str):
if gdict is None:
# get previous frame
frame = sys._getframe().f_back
glob_dict = frame.f_globals
loc_dict = frame.f_locals
else:
glob_dict = gdict
loc_dict = ldict
return matrix(_from_string(obj, glob_dict, loc_dict))
if isinstance(obj, (tuple, list)):
# [[A,B],[C,D]]
arr_rows = []
for row in obj:
if isinstance(row, N.ndarray): # not 2-d
return matrix(concatenate(obj, axis=-1))
else:
arr_rows.append(concatenate(row, axis=-1))
return matrix(concatenate(arr_rows, axis=0))
if isinstance(obj, N.ndarray):
return matrix(obj)
mat = asmatrix
|
mit
| -4,329,534,605,734,874,600
| 26.38769
| 89
| 0.471222
| false
| 3.861377
| false
| false
| false
|
Septima/qgis-qlrbrowser
|
src/QlrBrowser/mysettings/qgissettingmanager/types/bool.py
|
1
|
3112
|
#-----------------------------------------------------------
#
# QGIS setting manager is a python module to easily manage read/write
# settings and set/get corresponding widgets.
#
# Copyright : (C) 2013 Denis Rouzaud
# Email : denis.rouzaud@gmail.com
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this progsram; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt5.QtWidgets import QCheckBox
from qgis.core import QgsProject
from ..setting import Setting
from ..setting_widget import SettingWidget
from ..setting_manager import Debug
class Bool(Setting):
def __init__(self, name, scope, default_value, options={}):
Setting.__init__(self, name, scope, default_value, bool, QgsProject.instance().readBoolEntry, QgsProject.instance().writeEntryBool, options)
def check(self, value):
if type(value) != bool:
raise NameError("Setting %s must be a boolean." % self.name)
def config_widget(self, widget):
if type(widget) == QCheckBox:
return CheckBoxBoolWidget(self, widget, self.options)
elif hasattr(widget, "isCheckable") and widget.isCheckable():
return CheckableBoolWidget(self, widget, self.options)
else:
print(type(widget))
raise NameError("SettingManager does not handle %s widgets for booleans at the moment (setting: %s)" %
(type(widget), self.name))
class CheckBoxBoolWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.stateChanged
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
if Debug:
print("Bool: set_widget_value: {0}{1}".format(value, self.setting.name))
self.widget.setChecked(value)
def widget_value(self):
return self.widget.isChecked()
class CheckableBoolWidget(SettingWidget):
def __init__(self, setting, widget, options):
signal = widget.clicked
SettingWidget.__init__(self, setting, widget, options, signal)
def set_widget_value(self, value):
self.widget.setChecked(value)
def widget_value(self):
return self.widget.isChecked()
def widget_test(self, value):
print('cannot test checkable groupbox at the moment')
return False
|
gpl-2.0
| 4,159,583,462,942,297,000
| 36.506024
| 148
| 0.643959
| false
| 4.316227
| false
| false
| false
|
vhernandez/pygtksheet
|
examples/complex_test.py
|
1
|
11754
|
import sys
sys.path += ['/usr/local/lib/python2.6/dist-packages/gtk-2.0']
import gtk
from gtk import gdk
import pango
import gtksheet
from bordercombo import BorderCombo
#from gtkextra import BorderCombo
#import gtkextra
class TestSheet(gtksheet.Sheet):
def __init__(self):
gtksheet.Sheet.__init__(self, 20, 20, "Test")
colormap = gdk.colormap_get_system()
self.default_bg_color = colormap.alloc_color("light yellow")
self.default_fg_color = colormap.alloc_color("black")
self.set_background(self.default_bg_color)
self.set_grid(colormap.alloc_color("light blue"))
for column in xrange(self.get_columns_count()):
name = chr(ord("A") + column)
self.column_button_add_label(column, name)
self.set_column_title(column, name)
self.default_font = self.style.font_desc
class TestWindow(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
status_box = gtk.HBox(spacing=1)
status_box.set_border_width(0)
self.location = gtk.Label("")
(width, height) = self.location.size_request()
self.location.set_size_request(160, height)
status_box.pack_start(self.location, False)
self.entry = gtk.Entry()
self.entry.connect("changed", self._show_sheet_entry_cb)
status_box.pack_start(self.entry)
t = gtk.Toolbar()
ttips = gtk.Tooltips()
def add_widget_to_toolbar(widget, separator=True, tooltip=None):
ti = gtk.ToolItem()
ti.add(widget)
if tooltip is not None:
ti.set_tooltip(ttips, tooltip)
t.insert(ti, -1)
if separator:
t.insert(gtk.SeparatorToolItem(), -1)
fontbutton = gtk.FontButton()
fontbutton.connect("font-set", self._font_changed_cb)
add_widget_to_toolbar(fontbutton,
tooltip="Change the font of the selected cells");
self.fontbutton = fontbutton
items = \
(("justleft", None,
"Justify selected cells to the left",
gtk.STOCK_JUSTIFY_LEFT, self._justification_cb,
gtk.JUSTIFY_LEFT),
("justcenter", None,
"Justify selected cells to the center",
gtk.STOCK_JUSTIFY_CENTER, self._justification_cb,
gtk.JUSTIFY_CENTER),
("justright", None,
"Justify selected cells to the right",
gtk.STOCK_JUSTIFY_RIGHT, self._justification_cb,
gtk.JUSTIFY_RIGHT))
for name, label, tooltip, stock_id, cb, cb_params in items:
ti = gtk.Action(name, label, tooltip, stock_id)
ti.connect("activate", cb, cb_params)
t.insert(ti.create_tool_item(), -1)
bordercombo = BorderCombo()
bordercombo.connect("changed", self._border_changed_cb)
add_widget_to_toolbar(bordercombo,
tooltip="Change the border of the selected cells")
colormap = gdk.colormap_get_system()
colorbtn = gtk.ColorButton(colormap.alloc_color("black"))
colorbtn.connect("color-set", self._color_changed_cb, "f")
add_widget_to_toolbar(colorbtn, separator=False,
tooltip="Change the foreground color of the selected cells")
self.fgcolorbtn = colorbtn
colorbtn = gtk.ColorButton(colormap.alloc_color("light yellow"))
colorbtn.connect("color-set", self._color_changed_cb, "b")
add_widget_to_toolbar(colorbtn,
tooltip="Change the background color of the selected cells");
self.bgcolorbtn = colorbtn
self.sheet = TestSheet()
self.sheet.connect("activate", self._activate_sheet_cell_cb)
self.sheet.get_entry().connect("changed", self._show_entry_cb)
self.sheet.connect("changed", self._sheet_changed_cb)
ws = gtk.ScrolledWindow()
ws.add(self.sheet)
fd = self.sheet.default_font
fontbutton.set_font_name(fd.to_string())
vbox = gtk.VBox()
vbox.pack_start(t, False, False, 0)
vbox.pack_start(status_box, False, False, 0)
vbox.pack_start(ws, True, True, 0)
self.add(vbox)
self.set_size_request(500,400)
self.show_all()
def _sheet_changed_cb(self, sheet, row, column):
print "Sheet change at row: %d, column: %d" % (row, column)
def _show_sheet_entry_cb(self, entry):
if not entry.flags() & gtk.HAS_FOCUS:
return
sheet_entry = self.sheet.get_entry()
text = entry.get_text()
sheet_entry.set_text(text)
def _show_entry_cb(self, sheet_entry, *args):
if not sheet_entry.flags() & gtk.HAS_FOCUS:
return
text = sheet_entry.get_text()
self.entry.set_text(text)
def _activate_sheet_cell_cb(self, sheet, row, column):
title = sheet.get_column_title(column)
if title:
cell = " %s:%d " % (title, row)
else:
cell = " ROW: %d COLUMN: %d " % (row, column)
self.location.set_text(cell)
# Set attributes
attributes = sheet.get_attributes(row, column)
if attributes:
fd = attributes.font_desc if attributes.font_desc else self.sheet.default_font
fgcolor = attributes.foreground
bgcolor = attributes.background
else:
fd = self.sheet.default_font
fgcolor = self.sheet.default_fg_color
bgcolor = self.sheet.default_bg_color
self.fontbutton.set_font_name(fd.to_string())
self.fgcolorbtn.set_color(fgcolor)
self.bgcolorbtn.set_color(bgcolor)
# Set entry text
sheet_entry = sheet.get_entry()
self.entry.props.max_length = sheet_entry.props.max_length
text = sheet.cell_get_text(row, column)
if text:
self.entry.set_text(text)
else:
self.entry.set_text("")
print self.sheet.props.active_cell
def _font_changed_cb(self, widget):
r = self.sheet.props.selected_range
fd = pango.FontDescription(widget.get_font_name())
self.sheet.range_set_font(r, fd)
def _justification_cb(self, widget, data=None):
if data is None:
return
r = self.sheet.props.selected_range
if r:
self.sheet.range_set_justification(r, data)
def _border_changed_cb(self, widget):
border = widget.get_active()
range = self.sheet.props.selected_range
border_width = 3
self.sheet.range_set_border(range, 0, 0)
if border == 1:
border_mask = gtksheet.SHEET_TOP_BORDER
range.rowi = range.row0
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 2:
border_mask = gtksheet.SHEET_BOTTOM_BORDER
range.row0 = range.rowi
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 3:
border_mask = gtksheet.SHEET_RIGHT_BORDER
range.col0 = range.coli
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 4:
border_mask = gtksheet.SHEET_LEFT_BORDER
range.coli = range.col0
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 5:
if range.col0 == range.coli:
border_mask = gtksheet.SHEET_LEFT_BORDER | gtksheet.SHEET_RIGHT_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
else:
border_mask = gtksheet.SHEET_LEFT_BORDER
auxcol = range.coli
range.coli = range.col0
self.sheet.range_set_border(range, border_mask, border_width)
border_mask = gtksheet.SHEET_RIGHT_BORDER
range.col0 = range.coli = auxcol
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 6:
if range.row0 == range.rowi:
border_mask = gtksheet.SHEET_TOP_BORDER | gtksheet.SHEET_BOTTOM_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
else:
border_mask = gtksheet.SHEET_TOP_BORDER
auxrow = range.rowi
range.rowi = range.row0
self.sheet.range_set_border(range, border_mask, border_width)
border_mask = gtksheet.SHEET_BOTTOM_BORDER
range.row0 = range.rowi = auxrow
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 7:
border_mask = gtksheet.SHEET_RIGHT_BORDER | gtksheet.SHEET_LEFT_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 8:
border_mask = gtksheet.SHEET_BOTTOM_BORDER | gtksheet.SHEET_TOP_BORDER
self.sheet.range_set_border(range, border_mask, border_width)
elif border == 9:
self.sheet.range_set_border(range, 15, border_width)
for i in xrange(range.row0, range.rowi + 1):
for j in xrange(range.col0, range.coli + 1):
border_mask = 15
auxrange = sheet.SheetRange(i, j, i, j)
if i == range.rowi:
border_mask = border_mask ^ gtksheet.SHEET_BOTTOM_BORDER
if i == range.row0:
border_mask = border_mask ^ gtksheet.SHEET_TOP_BORDER
if j == range.coli:
border_mask = border_mask ^ gtksheet.SHEET_RIGHT_BORDER
if j == range.col0:
border_mask = border_mask ^ gtksheet.SHEET_LEFT_BORDER
if border_mask != 15:
self.sheet.range_set_border(auxrange, border_mask,
border_width)
elif border == 10:
for i in xrange(range.row0, range.rowi + 1):
for j in xrange(range.col0, range.coli + 1):
border_mask = 0
auxrange = gtksheet.SheetRange(i, j, i, j)
if i == range.rowi:
border_mask = border_mask | gtksheet.SHEET_BOTTOM_BORDER
if i == range.row0:
border_mask = border_mask | gtksheet.SHEET_TOP_BORDER
if j == range.coli:
border_mask = border_mask | gtksheet.SHEET_RIGHT_BORDER
if j == range.col0:
border_mask = border_mask | gtksheet.SHEET_LEFT_BORDER
if border_mask != 0:
self.sheet.range_set_border(auxrange, border_mask,
border_width)
elif border == 11:
border_mask = 15
self.sheet.range_set_border(range, border_mask, border_width)
def _color_changed_cb(self, widget, data=None):
# Bug in GtkSheet?: the color must be allocated with the system's
# colormap, else it is ignored
if data is None:
return
color = widget.get_color()
_range = self.sheet.props.selected_range
if data == "f":
self.sheet.range_set_foreground(_range, color)
else:
self.sheet.range_set_background(_range, color)
def main():
w = TestWindow()
w.connect("delete-event", lambda x,y: gtk.main_quit())
gtk.main()
if __name__=='__main__':
main()
|
gpl-2.0
| 9,136,060,946,695,285,000
| 39.253425
| 90
| 0.561766
| false
| 3.80758
| false
| false
| false
|
nash-x/hws
|
nova/huawei/scheduler/filters/disk_filter.py
|
1
|
2145
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.scheduler.filters import disk_filter
from nova.huawei import utils as h_utils
LOG = logging.getLogger(__name__)
class HuaweiDiskFilter(disk_filter.DiskFilter):
"""Disk Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
"""Filter based on disk usage."""
#deep copy a filter properties to avoid changing
filter_properties_tmp = copy.deepcopy(filter_properties)
context = filter_properties_tmp['context']
instance = filter_properties_tmp['request_spec']['instance_properties']
if h_utils.is_boot_from_volume(context, instance):
# just process local disk(ephemeral and swap), so set
# root_gb to zero
filter_properties_tmp.get('instance_type')['root_gb'] = 0
# if the request disk size is zero, we should return true.
# In negative free disk size condition, the instance booted volume
# is not create successfully.
instance_type = filter_properties.get('instance_type')
requested_disk = (1024 * (instance_type['ephemeral_gb']) +
instance_type['swap'])
if requested_disk == 0:
return True
return super(HuaweiDiskFilter, self).host_passes(host_state,
filter_properties_tmp)
|
apache-2.0
| -566,471,726,499,216,700
| 39.471698
| 79
| 0.660606
| false
| 4.41358
| false
| false
| false
|
pglomski/shopnotes
|
drill_speed_chart.py
|
1
|
2778
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''Produce a custom twist drill plot'''
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
# set some rcParams
mpl.rcParams['font.weight'] = 'bold'
mpl.rcParams['xtick.major.pad'] = 10
mpl.rcParams['xtick.direction'] = 'inout'
mpl.rcParams['xtick.labelsize'] = 26
mpl.rcParams['ytick.direction'] = 'inout'
mpl.rcParams['ytick.labelsize'] = 20
# define the constants for our chart
materials = [
('Acrylic' , 650 , 'c' , '-' ) ,
('Aluminum' , 300 , 'b' , '-' ) ,
('Brass' , 200 , 'g' , '-' ) ,
('LC Steel' , 110 , 'k' , '-' ) ,
('Wood' , 100 , 'brown' , '-' ) ,
('MC Steel' , 80 , 'darkgray' , '-' ) ,
('HC Steel' , 60 , 'lightgray' , '-' ) ,
('Stainless' , 50 , 'purple' , '-' ) ,
]
drill_speeds = [250, 340, 390, 510, 600, 650, 990, 1550, 1620, 1900, 2620, 3100] #rpm
speed_lims = (200., 4000.) # rpm
max_in = 1. # in.
incr = 1./16. # in.
im_sz = 25. # in.
ratio = 8.5/11.
fig = plt.figure(figsize=(im_sz,ratio * im_sz), dpi=600)
fig.patch.set_alpha(0)
# generate a vector of drill bit diameter
x = np.array([float(i) * incr for i in range(1,int(max_in/incr) + 1)]) # in.
# calculate the drill speed curve for each material type and plot the curve
for name, speed, color, linestyle in materials:
plt.loglog(x, 12/np.pi/x*speed, label=name, linewidth=5, color=color, linestyle=linestyle)
ax = plt.gca()
# adjust the axis tick locators to match drill press speeds
ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(drill_speeds))
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%4d'))
ax.yaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_ylim(speed_lims)
# set the drill diameter locators and format the ticks with LaTeX
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(base=incr))
ax.xaxis.set_minor_locator(mpl.ticker.NullLocator())
ax.set_xlim((incr, max_in))
ticks = ['0', r'$$\frac{1}{16}$$' , r'$$\frac{1}{8}$$' , r'$$\frac{3}{16}$$' , r'$$\frac{1}{4}$$' ,
r'$$\frac{5}{16}$$' , r'$$\frac{3}{8}$$' , r'$$\frac{7}{16}$$' , r'$$\frac{1}{2}$$' ,
r'$$\frac{9}{16}$$' , r'$$\frac{5}{8}$$' , r'$$\frac{11}{16}$$' , r'$$\frac{3}{4}$$' ,
r'$$\frac{13}{16}$$' , r'$$\frac{7}{8}$$' , r'$$\frac{15}{16}$$' , r'$$1$$' ]
ax.xaxis.set_ticklabels(ticks)
# Add the Texts
plt.xlabel('Bit Diameter (in.)', fontsize=26)
plt.ylabel('Drill Speed (rpm)' , fontsize=26)
plt.title('Twist Drill Speeds' , fontsize=50)
plt.legend(ncol=2, loc=3, fontsize=40)
plt.grid('on')
plt.savefig('drill_speed_chart.png')
|
agpl-3.0
| 6,025,454,394,184,277,000
| 35.077922
| 102
| 0.569114
| false
| 2.603561
| false
| false
| false
|
crmorse/weewx-waterflow
|
bin/weedb/mysql.py
|
1
|
9153
|
#
# Copyright (c) 2012 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
# $Revision$
# $Author$
# $Date$
#
"""Driver for the MySQL database"""
import decimal
import MySQLdb
import _mysql_exceptions
from weeutil.weeutil import to_bool
import weedb
def connect(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Connect to the specified database"""
return Connection(host=host, user=user, password=password, database=database, **kwargs)
def create(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Create the specified database. If it already exists,
an exception of type weedb.DatabaseExists will be thrown."""
# Open up a connection w/o specifying the database.
try:
connect = MySQLdb.connect(host = host,
user = user,
passwd = password, **kwargs)
cursor = connect.cursor()
# An exception will get thrown if the database already exists.
try:
# Now create the database.
cursor.execute("CREATE DATABASE %s" % (database,))
except _mysql_exceptions.ProgrammingError:
# The database already exists. Change the type of exception.
raise weedb.DatabaseExists("Database %s already exists" % (database,))
finally:
cursor.close()
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def drop(host='localhost', user='', password='', database='', driver='', **kwargs):
"""Drop (delete) the specified database."""
# Open up a connection
try:
connect = MySQLdb.connect(host = host,
user = user,
passwd = password, **kwargs)
cursor = connect.cursor()
try:
cursor.execute("DROP DATABASE %s" % database)
except _mysql_exceptions.OperationalError:
raise weedb.NoDatabase("""Attempt to drop non-existent database %s""" % (database,))
finally:
cursor.close()
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
class Connection(weedb.Connection):
"""A wrapper around a MySQL connection object."""
def __init__(self, host='localhost', user='', password='', database='', **kwargs):
"""Initialize an instance of Connection.
Parameters:
host: IP or hostname with the mysql database (required)
user: User name (required)
password: The password for the username (required)
database: The database to be used. (required)
kwargs: Any extra arguments you may wish to pass on to MySQL (optional)
If the operation fails, an exception of type weedb.OperationalError will be raised.
"""
try:
connection = MySQLdb.connect(host=host, user=user, passwd=password, db=database, **kwargs)
except _mysql_exceptions.OperationalError, e:
# The MySQL driver does not include the database in the
# exception information. Tack it on, in case it might be useful.
raise weedb.OperationalError(str(e) + " while opening database '%s'" % (database,))
weedb.Connection.__init__(self, connection, database, 'mysql')
# Allowing threads other than the main thread to see any transactions
# seems to require an isolation level of READ UNCOMMITTED.
self.query("SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED")
def cursor(self):
"""Return a cursor object."""
# The implementation of the MySQLdb cursor is lame enough that we are
# obliged to include a wrapper around it:
return Cursor(self)
def tables(self):
"""Returns a list of tables in the database."""
table_list = list()
try:
# Get a cursor directly from MySQL
cursor = self.connection.cursor()
cursor.execute("""SHOW TABLES;""")
while True:
row = cursor.fetchone()
if row is None: break
# Extract the table name. In case it's in unicode, convert to a regular string.
table_list.append(str(row[0]))
finally:
cursor.close()
return table_list
def genSchemaOf(self, table):
"""Return a summary of the schema of the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
try:
# Get a cursor directly from MySQL:
cursor = self.connection.cursor()
# MySQL throws an exception if you try to show the columns of a
# non-existing table
try:
cursor.execute("""SHOW COLUMNS IN %s;""" % table)
except _mysql_exceptions.ProgrammingError, e:
# Table does not exist. Change the exception type:
raise weedb.OperationalError(e)
irow = 0
while True:
row = cursor.fetchone()
if row is None: break
# Append this column to the list of columns.
colname = str(row[0])
if row[1].upper()=='DOUBLE':
coltype = 'REAL'
elif row[1].upper().startswith('INT'):
coltype = 'INTEGER'
elif row[1].upper().startswith('CHAR'):
coltype = 'STR'
else:
coltype = str(row[1]).upper()
is_primary = True if row[3] == 'PRI' else False
yield (irow, colname, coltype, to_bool(row[2]), row[4], is_primary)
irow += 1
finally:
cursor.close()
def columnsOf(self, table):
"""Return a list of columns in the specified table.
If the table does not exist, an exception of type weedb.OperationalError is raised."""
column_list = [row[1] for row in self.genSchemaOf(table)]
return column_list
def begin(self):
"""Begin a transaction."""
self.query("START TRANSACTION")
def commit(self):
try:
weedb.Connection.commit(self)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def rollback(self):
try:
weedb.Connection.rollback(self)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
def query(self, *args, **kwargs):
try:
self.connection.query(*args, **kwargs)
except _mysql_exceptions.OperationalError, e:
raise weedb.OperationalError(e)
class Cursor(object):
"""A wrapper around the MySQLdb cursor object"""
def __init__(self, connection):
"""Initialize a Cursor from a connection.
connection: An instance of db.mysql.Connection"""
# Get the MySQLdb cursor and store it internally:
self.cursor = connection.connection.cursor()
def execute(self, sql_string, sql_tuple=() ):
"""Execute a SQL statement on the MySQL server.
sql_string: A SQL statement to be executed. It should use ? as
a placeholder.
sql_tuple: A tuple with the values to be used in the placeholders."""
# MySQL uses '%s' as placeholders, so replace the ?'s with %s
mysql_string = sql_string.replace('?','%s')
try:
# Convert sql_tuple to a plain old tuple, just in case it actually
# derives from tuple, but overrides the string conversion (as is the
# case with a TimeSpan object):
self.cursor.execute(mysql_string, tuple(sql_tuple))
except (_mysql_exceptions.OperationalError, _mysql_exceptions.ProgrammingError), e:
raise weedb.OperationalError(e)
return self
def fetchone(self):
# Get a result from the MySQL cursor, then run it through the massage
# filter below
return massage(self.cursor.fetchone())
def close(self):
try:
self.cursor.close()
del self.cursor
except:
pass
#
# Supplying functions __iter__ and next allows the cursor to be used as an iterator.
#
def __iter__(self):
return self
def next(self):
result = self.fetchone()
if result is None:
raise StopIteration
return result
#
# This is a utility function for converting a result set that might contain
# longs or decimal.Decimals (which MySQLdb uses) to something containing just ints.
#
def massage(seq):
# Return the massaged sequence if it exists, otherwise, return None
if seq is not None:
return [int(i) if isinstance(i, long) or isinstance(i,decimal.Decimal) else i for i in seq]
|
gpl-3.0
| 4,721,289,113,010,788,000
| 36.979253
| 102
| 0.579919
| false
| 4.629742
| false
| false
| false
|
papaloizouc/peacehack
|
peacehack/theapp/migrations/0001_initial.py
|
1
|
5492
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CrazyObject',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ActionGeo_ADM1Code', models.CharField(max_length=10, null=True, blank=True)),
('ActionGeo_CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_FeatureID', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_FullName', models.CharField(max_length=200, null=True, blank=True)),
('ActionGeo_Lat', models.CharField(max_length=4, null=True, blank=True)),
('ActionGeo_Long', models.TextField(null=True, blank=True)),
('ActionGeo_Type', models.TextField(null=True, blank=True)),
('Actor1Code', models.TextField(null=True, blank=True)),
('Actor1CountryCode', models.TextField(null=True, blank=True)),
('Actor1EthnicCode', models.TextField(null=True, blank=True)),
('Actor1Geo_ADM1Code', models.TextField(null=True, blank=True)),
('Actor1Geo_CountryCode', models.IntegerField(null=True, blank=True)),
('Actor1Geo_FeatureID', models.IntegerField(null=True, blank=True)),
('Actor1Geo_FullName', models.TextField(null=True, blank=True)),
('Actor1Geo_Lat', models.TextField(null=True, blank=True)),
('Actor1Geo_Long', models.TextField(null=True, blank=True)),
('Actor1Geo_Type', models.IntegerField(null=True, blank=True)),
('Actor1KnownGroupCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Name', models.TextField(null=True, blank=True)),
('Actor1Religion1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Religion2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor1Type3Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2EthnicCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_ADM1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_CountryCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Geo_FeatureID', models.IntegerField(null=True, blank=True)),
('Actor2Geo_FullName', models.TextField(null=True, blank=True)),
('Actor2Geo_Lat', models.TextField(null=True, blank=True)),
('Actor2Geo_Long', models.TextField(null=True, blank=True)),
('Actor2Geo_Type', models.IntegerField(null=True, blank=True)),
('Actor2KnownGroupCode', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Name', models.TextField(null=True, blank=True)),
('Actor2Religion1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Religion2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type1Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type2Code', models.CharField(max_length=4, null=True, blank=True)),
('Actor2Type3Code', models.CharField(max_length=4, null=True, blank=True)),
('AvgTone', models.TextField(null=True, blank=True)),
('DATEADDED', models.IntegerField(null=True, blank=True)),
('EventBaseCode', models.IntegerField(null=True, blank=True)),
('EventCode', models.IntegerField(null=True, blank=True)),
('EventRootCode', models.IntegerField(null=True, blank=True)),
('FractionDate', models.TextField(null=True, blank=True)),
('GLOBALEVENTID', models.IntegerField(null=True, blank=True)),
('GoldsteinScale', models.TextField(null=True, blank=True)),
('IsRootEvent', models.IntegerField(null=True, blank=True)),
('MonthYear', models.IntegerField(null=True, blank=True)),
('NumArticles', models.IntegerField(null=True, blank=True)),
('NumMentions', models.IntegerField(null=True, blank=True)),
('NumSources', models.IntegerField(null=True, blank=True)),
('QuadClass', models.IntegerField(null=True, blank=True)),
('SOURCEURL', models.TextField(null=True, blank=True)),
('SQLDATE', models.IntegerField(null=True, blank=True)),
('Year', models.IntegerField(null=True, blank=True)),
('Day', models.IntegerField(null=True, blank=True)),
('Month', models.IntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
|
gpl-2.0
| 5,515,570,037,667,942,000
| 65.97561
| 114
| 0.599417
| false
| 3.922857
| false
| false
| false
|
ashishtilokani/Cloaking-Detection-Tool
|
googleBot/googleBot/spiders/scrape2.py
|
1
|
1236
|
from scrapy.selector import HtmlXPathSelector
from scrapy.spider import Spider
import html2text
import re
import os.path
class scrape(Spider):
name = "googleBot2"
start_urls = []
with open('/home/ashish/Desktop/CloakingDetectionTool/url.txt','r') as f:
for line in f:
l=line.replace("/", "_")
try:
f=open('/home/ashish/Desktop/CloakingDetectionTool/c2/'+ l + '.txt','r')
f.close()
except:
start_urls.append(line)
def parse(self, response):
regex = re.compile('[^A-Za-z0-9_]')
#First parameter is the replacement, second parameter is your input string
d={}
l=(response.url).replace("/", "_")
f=open('/home/ashish/Desktop/CloakingDetectionTool/c2/'+ l + '.txt','w')
terms=[]
terms = (response.body).split()
c=0
for word in terms:
word=regex.sub('', word)
if word not in d:
d[word]=1
f.write(word)
f.write(' ')
c=1
if c==0: #empty
f.write(' ')
f.write('\n')
f.close()
|
mit
| -6,094,454,040,259,034,000
| 29.146341
| 88
| 0.486246
| false
| 3.92381
| false
| false
| false
|
mprinc/McMap
|
src/scripts/CSN_Archive/check_object_names.py
|
1
|
4677
|
#!/usr/bin/env python
# Copyright (c) 2015, Scott D. Peckham
#------------------------------------------------------
# S.D. Peckham
# July 9, 2015
#
# Tool to extract the object part of every CSDMS Standard
# Variable Name and generate a list of objects that
# includes those as well as all parent objects.
#
# Example of use at a Unix prompt:
#
# % ./check_object_names.py CSN_VarNames_v0.82.txt
#------------------------------------------------------
#
# Functions:
# check_objects()
#
#------------------------------------------------------
import os.path
import sys
#------------------------------------------------------
def check_objects( in_file='CSN_VarNames_v0.82.txt' ):
#--------------------------------------------------
# Open input file that contains copied names table
#--------------------------------------------------
try:
in_unit = open( in_file, 'r' )
except:
print 'SORRY: Could not open TXT file named:'
print ' ' + in_file
#-------------------------
# Open new CSV text file
#-------------------------
## pos = in_file.rfind('.')
## prefix = in_file[0:pos]
## out_file = prefix + '.ttl'
out_file = 'All_Object_Names.txt'
#-------------------------------------------
OUT_EXISTS = os.path.exists( out_file )
if (OUT_EXISTS):
print 'SORRY, A text file with the name'
print ' ' + out_file
print ' already exists.'
return
out_unit = open( out_file, 'w' )
#---------------------------
# Parse all variable names
#---------------------------
n_objects = 0
object_list1 = list()
object_list2 = list()
while (True):
#------------------------------
# Read data line from in_file
#------------------------------
line = in_unit.readline()
if (line == ''):
break
#--------------------------------------------------
# Write object and quantity fullnames to TTL file
#--------------------------------------------------
line = line.strip() # (strip leading/trailing white space)
main_parts = line.split('__')
object_fullname = main_parts[0]
# quantity_fullname = main_parts[1]
#------------------------------------
# Append object name to object_list
#------------------------------------
object_list1.append( object_fullname )
object_list2.append( object_fullname )
#------------------------------------------------
# Append all parent object names to object_list
#------------------------------------------------
object_name = object_fullname
while (True):
pos = object_name.rfind('_')
if (pos < 0):
break
object_name = object_name[:pos]
object_list2.append( object_name )
#---------------------------------------------
# Create sorted lists of unique object names
# Not fastest method, but simple.
#---------------------------------------------
old_list = sorted( set(object_list1) )
new_list = sorted( set(object_list2) )
n_objects1 = len( old_list )
n_objects2 = len( new_list )
#--------------------------------------------
# Write complete object list to output file
#--------------------------------------------
for k in xrange( n_objects2 ):
out_unit.write( new_list[k] + '\n' )
#----------------------
# Close the input file
#----------------------
in_unit.close()
#----------------------------
# Close the TXT output file
#----------------------------
out_unit.close()
print 'Finished checking all object names.'
print 'Number of old object names =', n_objects1, '.'
print 'Number of new object names =', n_objects2, '.'
print ' '
# check_objects()
#------------------------------------------------------
if (__name__ == "__main__"):
#-----------------------------------------------------
# Note: First arg in sys.argv is the command itself.
#-----------------------------------------------------
n_args = len(sys.argv)
if (n_args < 2):
print 'ERROR: This tool requires an input'
print ' text file argument.'
print 'sys.argv =', sys.argv
print ' '
elif (n_args == 2):
check_objects( sys.argv[1] )
else:
print 'ERROR: Invalid number of arguments.'
#-----------------------------------------------------------------------
|
mit
| -1,015,613,779,617,027,700
| 32.407143
| 94
| 0.383579
| false
| 4.753049
| false
| false
| false
|
PrFalken/exaproxy
|
lib/exaproxy/icap/response.py
|
1
|
2403
|
class ICAPResponse (object):
def __init__ (self, version, code, status, headers, icap_header, http_header):
self.version = version
self.code = code
self.status = status
self.headers = headers
icap_len = len(icap_header)
http_len = len(http_header)
icap_end = icap_len
if http_header:
http_len_string = '%x\n' % http_len
http_string = http_len_string + http_header + '0\n'
http_offset = icap_end + len(http_len_string)
http_end = http_offset + http_len
else:
http_string = http_header
http_offset = icap_end
http_end = icap_end
self.response_view = memoryview(icap_header + http_string)
self.icap_view = self.response_view[:icap_end]
self.http_view = self.response_view[http_offset:http_end]
@property
def response_string (self):
return self.response_view.tobytes()
@property
def icap_header (self):
return self.icap_view.tobytes()
@property
def http_header (self):
return self.http_view.tobytes()
@property
def pragma (self):
return self.headers.get('pragma', {})
@property
def is_permit (self):
return False
@property
def is_modify (self):
return False
@property
def is_content (self):
return False
@property
def is_intercept (self):
return False
class ICAPRequestModification (ICAPResponse):
def __init__ (self, version, code, status, headers, icap_header, http_header, intercept_header=None):
ICAPResponse.__init__(self, version, code, status, headers, icap_header, http_header)
self.intercept_header = intercept_header
@property
def is_permit (self):
return self.code == 304
@property
def is_modify (self):
return self.code == 200 and self.intercept_header is None
@property
def is_intercept (self):
return self.code == 200 and self.intercept_header is not None
class ICAPResponseModification (ICAPResponse):
@property
def is_content (self):
return self.code == 200
class ICAPResponseFactory:
def __init__ (self, configuration):
self.configuration = configuration
def create (self, version, code, status, headers, icap_header, request_header, response_header, intercept_header=None):
if response_header:
response = ICAPResponseModification(version, code, status, headers, icap_header, response_header)
else:
response = ICAPRequestModification(version, code, status, headers, icap_header, request_header, intercept_header=intercept_header)
return response
|
bsd-2-clause
| -2,532,343,003,033,045,000
| 23.520408
| 133
| 0.714524
| false
| 3.128906
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.