repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
aroquemaurel/Cuis-In
|
cuisin/tasting/models.py
|
Python
|
gpl-2.0
| 1,752
| 0.001712
|
from django.db import models
class TastingCategory(models.Model):
title = models.CharField(max_length=128)
singularTitle = models.CharField(max_length=128)
slug = models.SlugField(max_length=128)
def __unicode__(self):
return self.title
class Tasting(models.Model):
category = models.ForeignKey('TastingCategory', on_delete=models.CASCADE)
slug = models.SlugField(max_length=128)
name = models.CharField(max_length=128)
flair = models.TextField()
mouth = models.TextField()
color = models.TextField()
note = models.IntegerField()
date = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name="Date d'ajout")
def __unicode__(self):
return self.category.title + " " + self.name
class WhiskyType(models.Model):
type = models.CharField(m
|
ax_length=128)
def __unicode__(self):
return self.type
class CoffeeCountry(models.Model):
country = models.CharField(max_length=128)
def __unicode__(self):
return self.country
class Whisky(Tasting):
old = models.IntegerField()
type = models.ForeignKey('WhiskyType', on_delete=models.CASCADE)
degAlcool = models.IntegerField()
def __str__(self):
return self.type.type + " " + self.name + " " + str(self.old) + " ans"
|
class Coffee(Tasting):
country = models.ForeignKey('CoffeeCountry', on_delete=models.CASCADE)
altitude = models.IntegerField()
strength = models.IntegerField()
def __str__(self):
return self.category.title + " " + self.country.country + " " + self.name
class Wine(Tasting):
year = models.IntegerField()
degAlcool = models.IntegerField()
def __str__(self):
return self.category.title + " " + self.name
|
pusnik/django-libtech-emailuser
|
emailuser/models.py
|
Python
|
bsd-3-clause
| 3,681
| 0.002717
|
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.db import models
from django.utils import timezone
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
import warnings
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, PermissionsMixin
)
class EmailUserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""
Creates and saves a User with the giv
|
en username, email and password.
"""
#assert False, "in user manager"
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
#email = UserManager.normalize_email(email)
user = EmailUser(email=email,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, date_joined=now, **extra_fields)
user.set_password(passw
|
ord)
user.save(using=self._db)
return user
def create_superuser(self, email, password, **extra_fields):
u = self.create_user(email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
class EmailUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), unique=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = EmailUserManager()
USERNAME_FIELD = 'email'
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
#abstract = True,cc
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.username)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
warnings.warn(
"The use of AUTH_PROFILE_MODULE to define user profiles has been deprecated.",
DeprecationWarning)
class PasswordReset(models.Model):
user = models.OneToOneField(EmailUser, related_name="profile")
key = models.CharField(max_length=100)
used = models.BooleanField(default=False)
|
willkg/postatus
|
postatus/status.py
|
Python
|
bsd-3-clause
| 4,559
| 0.001535
|
import requests
class Status(object):
SKIP_LOCALES = ['en_US']
def __init__(self, url, app=None, highlight=None):
self.url = url
self.app = app
self.highlight = highlight or []
self.data = []
self.created = None
def get_data(self):
if self.data:
return
resp = requests.get(self.url)
if resp.status_code != 200:
resp.raise_for_status()
self.data = resp.json()
self.created = self.data[-1]['created']
def summary(self):
"""Generates summary data of today's state"""
self.get_data()
highlight = self.highlight
last_item = self.data[-1]
output = {}
output['app'] = self.app or 'ALL'
data = last_item['locales']
if self.app:
get_item = lambda x: x['apps'][self.app]
else:
get_item = lambda x: x
apps = data.items()[0][1]['apps'].keys()
apps.sort()
output['apps'] = apps
items = [item for item in data.items() if item[0] not in highlight]
hitems = [item for item in data.items() if item[0] in highlight]
highlighted = []
if hitems:
for loc, loc_data in sorted(hitems, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
item = get_item(loc_data)
total = item.get('total', -1)
translated = item.get('translated', -1)
percent = item.get('percent', -1)
untranslated_words = item.get('untranslated_words', -1)
highlighted.appe
|
nd({
'locale': loc,
'percent': percent,
'total': total,
'translated': translated,
'untranslated': total - translated,
'untranslated_words': untranslated_words
|
})
output['highlighted'] = highlighted
locales = []
for loc, loc_data in sorted(items, key=lambda x: -x[1]['percent']):
if loc in self.SKIP_LOCALES:
continue
item = get_item(loc_data)
total = item.get('total', -1)
translated = item.get('translated', -1)
percent = item.get('percent', -1)
untranslated_words = item.get('untranslated_words', -1)
locales.append({
'locale': loc,
'percent': percent,
'total': total,
'translated': translated,
'untranslated': total - translated,
'untranslated_words': untranslated_words
})
output['locales'] = locales
output['created'] = self.created
return output
def _mark_movement(self, data):
"""For each item, converts to a tuple of (movement, item)"""
ret = []
prev_day = None
for i, day in enumerate(data):
if i == 0:
ret.append(('', day))
prev_day = day
continue
if prev_day > day:
item = ('down', day)
elif prev_day < day:
item = ('up', day)
else:
item = ('equal', day)
prev_day = day
ret.append(item)
return ret
def history(self):
self.get_data()
data = self.data
highlight = self.highlight
app = self.app
# Get a list of the locales we'll iterate through
locales = sorted(data[-1]['locales'].keys())
num_days = 14
# Truncate the data to what we want to look at
data = data[-num_days:]
if app:
get_data = lambda x: x['apps'][app]['percent']
else:
get_data = lambda x: x['percent']
hlocales = [loc for loc in locales if loc in highlight]
locales = [loc for loc in locales if loc not in highlight]
output = {}
output['app'] = self.app or 'All'
output['headers'] = [item['created'] for item in data]
output['highlighted'] = sorted(
(loc, self._mark_movement(get_data(day['locales'][loc]) for day in data))
for loc in hlocales
)
output['locales'] = sorted(
(loc, self._mark_movement(get_data(day['locales'].get(loc, {'percent': 0.0})) for day in data))
for loc in locales
)
output['created'] = self.created
return output
|
eneldoserrata/marcos_openerp
|
addons/report_xls/report_xls.py
|
Python
|
agpl-3.0
| 9,067
| 0.002207
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2013 Noviat nv/sa (www.noviat.com). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import xlwt
from xlwt.Style import default_style
import cStringIO
from datetime import datetime
from openerp.osv.fields import datetime as datetime_field
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import inspect
from types import CodeType
from openerp.report.report_sxw import *
from openerp import pooler
from openerp.tools.translate import translate, _
import logging
_logger = logging.getLogger(__name__)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class report_xls(report_sxw):
xls_types = {
'bool': xlwt.Row.set_cell_boolean,
'date': xlwt.Row.set_cell_date,
'text': xlwt.Row.set_cell_text,
'number': xlwt.Row.set_cell_number,
}
xls_types_default = {
'bool': False,
'date': None,
'text': '',
'number': 0,
}
# TO DO: move parameters infra to configurable data
# header/footer
hf_params = {
'font_size': 8,
'font_style': 'I', # B: Bold, I: Italic, U: Underline
}
# styles
_pfc = '26' # default pattern fore_color
_bc = '22' # borders color
decimal_format = '#,##0.00'
date_format = 'YYYY-MM-DD'
xls_styles = {
'xls_title': 'font: bold true, height 240;',
'bold': 'font: bold true;',
'underline': 'font: underline true;',
'italic': 'font: italic true;',
'fill': 'pattern: pattern solid, fore_color %s;' % _pfc,
'fill_blue': 'pattern: pattern solid, fore_color 27;',
'fill_grey': 'pattern: pattern solid, fore_color 22;',
'borders_all': 'borders: left thin, right thin, top thin, bottom thin, '
'left_colour %s, right_colour %s, top_colour %s, bottom_colour %s;' % (_bc, _bc, _bc, _bc),
'left': 'align: horz left;',
'center': 'align: horz center;',
'right': 'align: horz right;',
'wr
|
ap': 'align: wrap true;',
'top': 'align: vert top;',
'bottom': 'align: vert bottom;',
}
# TO DO: move parameters supra to configurable data
def creat
|
e(self, cr, uid, ids, data, context=None):
self.pool = pooler.get_pool(cr.dbname)
self.cr = cr
self.uid = uid
report_obj = self.pool.get('ir.actions.report.xml')
report_ids = report_obj.search(cr, uid,
[('report_name', '=', self.name[7:])], context=context)
if report_ids:
report_xml = report_obj.browse(cr, uid, report_ids[0], context=context)
self.title = report_xml.name
if report_xml.report_type == 'xls':
return self.create_source_xls(cr, uid, ids, data, context)
elif context.get('xls_export'):
self.table = data.get('model') or self.table # use model from 'data' when no ir.actions.report.xml entry
return self.create_source_xls(cr, uid, ids, data, context)
return super(report_xls, self).create(cr, uid, ids, data, context)
def create_source_xls(self, cr, uid, ids, data, context=None):
if not context:
context = {}
parser_instance = self.parser(cr, uid, self.name2, context)
self.parser_instance = parser_instance
objs = self.getObjects(cr, uid, ids, context)
parser_instance.set_context(objs, data, ids, 'xls')
objs = parser_instance.localcontext['objects']
n = cStringIO.StringIO()
wb = xlwt.Workbook(encoding='utf-8')
_p = AttrDict(parser_instance.localcontext)
_xs = self.xls_styles
self.xls_headers = {
'standard': '',
}
report_date = datetime_field.context_timestamp(cr, uid, datetime.now(), context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
self.xls_footers = {
'standard': ('&L&%(font_size)s&%(font_style)s' + report_date +
'&R&%(font_size)s&%(font_style)s&P / &N') % self.hf_params,
}
self.generate_xls_report(_p, _xs, data, objs, wb)
wb.save(n)
n.seek(0)
return (n.read(), 'xls')
def render(self, wanted, col_specs, rowtype, render_space='empty'):
"""
returns 'evaluated' col_specs
Input:
- wanted: element from the wanted_list
- col_specs : cf. specs[1:] documented in xls_row_template method
- rowtype : 'header' or 'data'
- render_space : type dict, (caller_space + localcontext) if not specified
"""
if render_space == 'empty':
render_space = {}
caller_space = inspect.currentframe().f_back.f_back.f_locals
localcontext = self.parser_instance.localcontext
render_space.update(caller_space)
render_space.update(localcontext)
row = col_specs[wanted][rowtype][:]
for i in range(len(row)):
if isinstance(row[i], CodeType):
row[i] = eval(row[i], render_space)
row.insert(0, wanted)
#_logger.warn('row O = %s', row)
return row
def generate_xls_report(self, parser, xls_styles, data, objects, wb):
""" override this method to create your excel file """
raise NotImplementedError()
def xls_row_template(self, specs, wanted_list):
"""
Returns a row template.
Input :
- 'wanted_list': list of Columns that will be returned in the row_template
- 'specs': list with Column Characteristics
0: Column Name (from wanted_list)
1: Column Colspan
2: Column Size (unit = the width of the character ’0′ as it appears in the sheet’s default font)
3: Column Type
4: Column Data
5: Column Formula (or 'None' for Data)
6: Column Style
"""
r = []
col = 0
for w in wanted_list:
found = False
for s in specs:
if s[0] == w:
found = True
s_len = len(s)
c = list(s[:5])
# set write_cell_func or formula
if s_len > 5 and s[5] is not None:
c.append({'formula': s[5]})
else:
c.append({'write_cell_func': report_xls.xls_types[c[3]]})
# Set custom cell style
if s_len > 6 and s[6] is not None:
c.append(s[6])
else:
c.append(None)
# Set cell formula
if s_len > 7 and s[7] is not None:
c.append(s[7])
else:
c.append(None)
r.append((col, c[1], c))
col += c[1]
break
if not found:
_logger.warn("report_xls.xls_row_template, column '%s' not found in specs", w)
return r
def xls_write_row(self, ws, row_pos, row_data, row_style=default_style, set_column_size=False):
r = ws.row(row_pos)
for col, size, spec in row_data:
data = spec[4]
formul
|
grevutiu-gabriel/sympy
|
sympy/concrete/summations.py
|
Python
|
bsd-3-clause
| 24,412
| 0.001106
|
from __future__ import print_function, division
from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.concrete.expr_with_intlimits import ExprWithIntLimits
from sympy.core.function import Derivative
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Wild)
from sympy.concrete.gosper import gosper_sum
from sympy.functions.elementary.piecewise import Piecewise
from sympy.polys import apart, PolynomialError
from sympy.solvers import solve
from sympy.core.compatibility import range
from sympy.tensor.indexed import Idx
class Sum(AddWithLimits, ExprWithIntLimits):
r"""Represents unevaluated summation.
``Sum`` represents a finite or infinite series, with the first argument
being the general form of terms in the series, and the second argument
being ``(dummy_variable, start, end)``, with ``dummy_variable`` taking
all integer values from ``start`` through ``end``. In accordance with
long-standing mathematical convention, the end term is included in the
summation.
Finite sums
===========
For finite sums (and sums with symbolic limits assumed to be finite) we
follow the summation convention described by Karr [1], especially
definition 3 of section 1.4. The sum:
.. math::
\sum_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\sum_{m \leq i < n} f(i) = f(m) + f(m+1) + \ldots + f(n-2) + f(n-1)
with the upper limit value `f(n)` excluded. The sum over an empty set is
zero if and only if `m = n`:
.. math::
\sum_{m \leq i < n} f(i) = 0 \quad \mathrm{for} \quad m = n
Finally, for all other sums over empty sets we assume the following
definition:
.. math::
\sum_{m \leq i < n} f(i) = - \sum_{n \leq i < m} f(i) \quad \mathrm{for} \quad m > n
It is important to note that Karr defines all sums with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the summation convention. Indeed we have:
.. math::
\sum_{m \leq i < n} f(i) = \sum_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import i, k, m, n, x
>>> from sympy import Sum, factorial, oo, IndexedBase, Function
>>> Sum(k, (k, 1, m))
Sum(k, (k, 1, m))
>>> Sum(k, (k, 1, m)).doit()
m**2/2 + m/2
>>> Sum(k**2, (k, 1, m))
Sum(k**2, (k, 1, m))
>>> Sum(k**2, (k, 1, m)).doit()
m**3/3 + m**2/2 + m/6
>>> Sum(x**k, (k, 0, oo))
Sum(x**k, (k, 0, oo))
>>> Sum(x**k, (k, 0, oo)).doit()
Piecewise((1/(-x + 1), Abs(x) < 1), (Sum(x**k, (k, 0, oo)), True))
>>> Sum(x**k/factorial(k), (k, 0, oo)).doit()
exp(x)
Here are examples to do summation with symbolic indices. You
can use either Function of IndexedBase classes:
>>> f = Function('f')
>>> Sum(f(n), (n, 0, 3)).doit()
f(0) + f(1) + f(2) + f(3)
>>> Sum(f(n), (n, 0, oo)).doit()
Sum(f(n), (n, 0, oo))
>>> f = IndexedBase('f')
>>> Sum(f[n]**2, (n, 0, 3)).doit()
f[0]**2 + f[1]**2 + f[2]**2 + f[3]**2
An example showing that the symbolic result of a summation is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those sums by interchanging the limits according to the above rules:
>>> S = Sum(i, (i, 1, n)).doit()
>>> S
n**2/2 + n/2
>>> S.subs(n, -4)
6
>>> Sum(i, (i, 1, -4)).doit()
6
>>> Sum(-i, (i, -3, 0)).doit()
6
An explicit example of the Karr summation convention:
>>> S1 = Sum(i**2, (i, m, m+n-1)).doit()
>>> S1
m**2*n + m*n**2 - m*n + n**3/3 - n**2/2 + n/6
>>> S2 = Sum(i**2, (i, m+n, m-1)).doit()
>>> S2
-m**2*n - m*n**2 + m*n - n**3/3 + n**2/2 - n/6
>>> S1 + S2
0
>>> S3 = Sum(i, (i, m, m-1)).doit()
>>> S3
0
See Also
========
summation
Product, product
References
==========
.. [1] Michael Karr, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] http://en.wikipedia.org/wiki/Summation#Capital-sigma_notation
.. [3] http://en.wikipedia.org/wiki/Empty_sum
"""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
if not hasattr(obj, 'limits'):
return obj
if any(len(l) != 3 or None in l for l in obj.limits):
raise ValueError('Sum requires values for lower and upper bounds.')
return obj
def _eval_is_zero(self):
# a Sum is only zero if its function is zero or if all terms
# cancel out. This only answers whether the summand is zero; if
# not then None is returned since we don't analyze whether all
# terms cancel out.
if self.function.is_zero:
return True
def doit(self, **hints):
if hints.get('deep', True):
f = self.function.doit(**hints)
else:
f = self.function
if self.function.is_Matrix:
return self.expand().doit()
for n, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_integer and (dif < 0) == True:
a, b = b + 1, a - 1
f = -f
if isinstance(i, Idx):
i = i.label
newf = eval_sum(f, (i, a, b))
if newf is None:
if f == self.function:
return self
else:
return self.func(f, *self.limits[n:])
f = newf
if hints.get('deep', True):
# eval_sum could return partially unevaluated
# result with Piecewise. In this case we won't
# doit() recursively.
if not isinstance(f, Piecewise):
return f.doit(**hi
|
nts)
return f
def _eval_derivative(self, x):
"""
Differentiate wrt x as long as x is not in the free symbols of any of
the upper or lower limits.
Sum(a*b*x, (x, 1, a)) can be differentiated wrt x or b but not `a`
since the value of the sum is discontinuous in `a`. In a case
involving a limit variable, the unevaluated derivative is retur
|
ned.
"""
# diff already confirmed that x is in the free symbols of self, but we
# don't want to differentiate wrt any free symbol in the upper or lower
# limits
# XXX remove this test for free_symbols when the default _eval_derivative is in
if x not in self.free_symbols:
return S.Zero
# get limits and the function
f, limits = self.function, list(self.limits)
limit = limits.pop(-1)
if limits: # f is the argument to a Sum
f = self.func(f, *limits)
if len(limit) == 3:
_, a, b = limit
if x in a.free_symbols or x in b.free_symbols:
return None
df = Derivative(f, x, evaluate=True)
rv = self.func(df, limit)
if limit[0] not in df.free_symbols:
rv = rv.doit()
return rv
else:
return NotImplementedError('Lower and upper bound expected.')
def _eval_simplify(self, ratio, measure):
from sympy.simplify.simplify import sum_simplify
return sum_simplify(self)
def _eval_summation(self, f, x):
return None
def euler_maclaurin(self, m=0, n=0, eps=0, eval_integral=True):
"""
Return an Euler-Maclaurin approximation of self, where m is the
number of leading terms to sum directly and n is the number of
terms in the tail.
With m = n = 0, this is simply the corresponding integral
plus a first-order endpoint c
|
dtg01100/edi-to-csv-converter
|
button.py
|
Python
|
gpl-3.0
| 224
| 0.03125
|
import Tkinter
import tkMessageBox
top = Tkinter.Tk()
def helloCallBack():
tkMessageBox.showinfo( "Hell
|
o Python", "Hello World")
B = Tkinter.Button(top, text ="Hello", command = helloCallBack)
B.pack()
top.mainlo
|
op()
|
jmolloy/pedigree
|
scripts/buildLibc.py
|
Python
|
isc
| 2,327
| 0.005161
|
#! /usr/bin/env python
# coding=utf8
#
# Copyright (c) 2008 James Molloy, Jörg Pfähler, Matthew Iselin
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT
|
, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import tempfile
import shutil
import os
def doLibc(builddir, inputLibcA, glue_name, pedigree_c_name, ar, cc, libgcc):
print "Building libc..."
tmpdir = tempfile.mkdtemp()
buildOut = builddir + "/libc"
olddir
|
= os.getcwd()
os.chdir(tmpdir)
shutil.copy(inputLibcA, tmpdir + "/libc.a")
os.system(ar + " x libc.a")
glue = glue_name
shutil.copy(glue, tmpdir + "/" + os.path.basename(glue_name))
shutil.copy(pedigree_c_name, tmpdir + "/" + os.path.basename(pedigree_c_name))
objs_to_remove = ["init", "getpwent", "signal", "fseek", "getcwd", "rename", "rewinddir", "opendir", "readdir", "closedir", "_isatty", "basename", "setjmp"]
for i in objs_to_remove:
try:
os.remove("lib_a-" + i + ".o")
except:
continue
res = os.system(ar + " x " + os.path.basename(glue_name))
if res != 0:
print " (failed)"
exit(res)
res = os.system(cc + " -nostdlib -shared -Wl,-shared -Wl,-soname,libc.so -o " + buildOut + ".so *.obj *.o -L. -lpedigree-c -lgcc")
if res != 0:
print " (failed)"
exit(res)
res = os.system(ar + " cru " + buildOut + ".a *.o *.obj")
if res != 0:
print " (failed)"
os.unlink("%s.so" % (buildOut,))
exit(res)
for i in os.listdir("."):
os.remove(i)
os.chdir(olddir)
os.rmdir(tmpdir)
import sys
doLibc(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], "")
|
kshimo69/IkaLog
|
ikalog/outputs/alive_squids_csv.py
|
Python
|
apache-2.0
| 3,445
| 0.001462
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is still in proof of concept, and subject to change.
#
from datetime import datetime
# IkaLog Output Plugin: Write 'Alive Squids' CSV data
#
class AliveSquidsCSV(object):
##
# Write a line to text file.
# @param self The Object Pointer.
# @param record Record (text)
#
def write_record(self, file, record):
try:
csv_file = open(file, "a")
csv_file.write(record)
csv_file.close
except:
print("CSV: Failed to write CSV File")
def write_alive_squids_csv(self, context, basename="ikabattle_log", debug=False):
csv = ["tick,y\n", "tick,y\n"]
for sample in context['game']['livesTrack']:
if debug:
print('lives sample = %s', sample)
time = sample[0]
del sample[0]
num_team = 0
for team in sample:
num_squid = 0
for alive in team:
num_squid = num_squid + 1
if alive:
csv[num_team] = "%s%d, %d\n" % (
csv[num_team], time, num_squid)
num_team = num_team + 1
num_team = 0
t = datetime.now()
t_str = t.strftime("%Y%m%d_%H%M")
for f in csv:
self.write_record('%s/%s_team%d.csv' %
(self.dest_dir, basename, num_team), f)
num_team = num_team + 1
def write_flags_csv(self, context, basename="ikabattle_log", debug=False):
# データがない場合は書かない
if len(context['game']['towerTrack']) == 0:
return
csv = "tick,pos,max,min\n"
for sample in context['game']['towerTrack']:
if debu
|
g:
print('tower sample = %s', sample)
time = sample[0]
samp
|
le = sample[1]
csv = "%s%d, %d, %d, %d\n" % (
csv, time, sample['pos'], sample['max'], sample['min'])
self.write_record('%s/%s_tower.csv' % (self.dest_dir, basename), csv)
##
# on_game_individual_result Hook
# @param self The Object Pointer
# @param context IkaLog context
#
def on_game_individual_result(self, context):
t = datetime.now()
basename = t.strftime("ikabattle_log_%Y%m%d_%H%M")
self.write_alive_squids_csv(context, basename=basename, debug=self.debug)
self.write_flags_csv(context, basename=basename, debug=self.debug)
##
# Constructor
# @param self The Object Pointer.
# @param dest_dir Destionation directory (Relative path, or absolute path)
def __init__(self, dir='./log/', debug=False):
self.dest_dir = dir
self.debug = debug
|
SimenB/thefuck
|
tests/rules/test_git_push_without_commits.py
|
Python
|
mit
| 659
| 0
|
import pytest
from thefuck.types import C
|
ommand
from thefuck.rules.git_push_without_commits import (
fix,
get_new_command,
match,
)
command = 'git push -u origin master'
expected_error = '''
error: src refspec master does not match any.
error: failed to push some refs to 'git@github.com:User/repo.git'
'''
@pytest.mark.parametrize('command', [Command(command, expected_error)])
def test_match(command):
assert match(command)
@pytest.mark.pa
|
rametrize('command, result', [(
Command(command, expected_error),
fix.format(command=command),
)])
def test_get_new_command(command, result):
assert get_new_command(command) == result
|
lordscales91/blender_mmd_tools
|
mmd_tools/properties/bone.py
|
Python
|
mit
| 4,418
| 0.001132
|
# -*- coding: utf-8 -*-
from bpy.types import PropertyGroup
from bpy.props import StringProperty, IntProperty, BoolProperty, FloatProperty, FloatVectorProperty
from mmd_tools.core.bone import FnBone
def _updateMMDBoneAdditionalTransform(prop, context):
prop['is_additional_transform_dirty'] = True
p_bone = context.active_pose_bone
if p_bone and p_bone.mmd_bone.as_pointer() == prop.as_pointer():
FnBone.apply_additional_transformation(prop.id_data)
def _getAdditionalTransformBone(prop):
arm = prop.id_data
bone_id = prop.get('additional_transform_bone_id', -1)
if bone_id < 0:
return ''
fnBone = FnBone.from_bone_id(arm, bone_id)
if not fnBone:
return ''
return fnBone.pose_bone.name
def _setAdditionalTransformBone(prop, value):
arm = prop.id_data
prop['is_additional_transform_dirty'] = True
if value not in arm.pose.bones.keys():
prop['additional_transform_bone_id'] = -1
return
pose_bone = arm.pose.bones[value]
bone = FnBone(pose_bone)
prop['additional_transform_bone_id'] = bone.bone_id
class MMDBone(PropertyGroup):
name_j = StringProperty(
name='Name',
description='Japanese Name',
default='',
)
name_e = StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
bone_id = IntProperty(
name='Bone ID',
default=-1,
)
transform_order = IntProperty(
name='Transform Order',
description='Deformation tier',
min=0,
max=100,
)
is_visible = BoolProperty(
name='Visible',
description='Is visible',
default=True,
)
is_controllable = BoolProperty(
name='Controllable',
description='Is controllable',
default=True,
)
transform_after_dynamics = BoolProperty(
name='After Dynamics',
description='After physics',
default=False,
)
enabled_fixed_axis = BoolProperty(
name='Fixed Axis',
description='Use fixed axis',
default=False,
)
fixed_axis = FloatVectorProperty(
name='Fixed Axis',
description='Fixed axis',
subtype='XYZ',
size=3,
default=[0, 0, 0],
)
enabled_local_axes = BoolProperty(
name='Local Axes',
description='Use local axes',
default=False,
)
local_axis_x = FloatVectorProperty(
name='Local X-Axis',
description='Local x-axis',
subtype='XYZ',
size=3,
default=[1, 0, 0],
)
local_axis_z = FloatVectorProperty(
name='Local Z-Axis',
|
description='Local z-axis',
subtype='XYZ',
size=3,
default=[0, 0, 1],
)
is_tip = BoolProperty(
name='Tip Bone',
description='Is zero length bone',
default=False,
)
ik_rotation_constraint = FloatProperty(
name='IK Rotation Constraint',
description='The unit angle of IK',
subtype='ANGLE',
soft_min=0,
soft_max=4,
default=1,
)
has_
|
additional_rotation = BoolProperty(
name='Additional Rotation',
description='Additional rotation',
default=False,
update=_updateMMDBoneAdditionalTransform,
)
has_additional_location = BoolProperty(
name='Additional Location',
description='Additional location',
default=False,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_bone = StringProperty(
name='Additional Transform Bone',
description='Additional transform bone',
set=_setAdditionalTransformBone,
get=_getAdditionalTransformBone,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_bone_id = IntProperty(
name='Additional Transform Bone ID',
default=-1,
update=_updateMMDBoneAdditionalTransform,
)
additional_transform_influence = FloatProperty(
name='Additional Transform Influence',
description='Additional transform influence',
default=1,
soft_min=-1,
soft_max=1,
update=_updateMMDBoneAdditionalTransform,
)
is_additional_transform_dirty = BoolProperty(
name='',
default=True
)
|
ntt-sic/neutron
|
neutron/plugins/ml2/drivers/mech_openvswitch.py
|
Python
|
apache-2.0
| 2,288
| 0
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.common import constants
from neutron.ext
|
ensions import portbindings
from neutron.openstack.common import log
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import mech_agent
LOG = log.getLogger(__name__)
class OpenvswitchMechanismDriver(mech_agent.AgentMechanismDriverBase):
"""Attach to networks using openvswitch L2 agent.
The OpenvswitchMech
|
anismDriver integrates the ml2 plugin with the
openvswitch L2 agent. Port binding with this driver requires the
openvswitch agent to be running on the port's host, and that agent
to have connectivity to at least one segment of the port's
network.
"""
def __init__(self):
super(OpenvswitchMechanismDriver, self).__init__(
constants.AGENT_TYPE_OVS,
portbindings.VIF_TYPE_OVS,
True)
def check_segment_for_agent(self, segment, agent):
mappings = agent['configurations'].get('bridge_mappings', {})
tunnel_types = agent['configurations'].get('tunnel_types', [])
LOG.debug(_("Checking segment: %(segment)s "
"for mappings: %(mappings)s "
"with tunnel_types: %(tunnel_types)s"),
{'segment': segment, 'mappings': mappings,
'tunnel_types': tunnel_types})
network_type = segment[api.NETWORK_TYPE]
if network_type == 'local':
return True
elif network_type in tunnel_types:
return True
elif network_type in ['flat', 'vlan']:
return segment[api.PHYSICAL_NETWORK] in mappings
else:
return False
|
klangner/iclogger
|
src/dev_server.py
|
Python
|
bsd-2-clause
| 223
| 0.004484
|
# -*- coding: utf-8 -*-
'''
Created on 2014-04-09
@author: Krzysztof Langner
''
|
'
# import iclogger.file_logger as Logger
import iclogger.dynamodb_logger as Logger
if __name__ == "__main__":
Logger.app.run(de
|
bug=True)
|
daniellawrence/external_naginator
|
tests/test_basic.py
|
Python
|
mit
| 341
| 0.002933
|
import unittest
c
|
lass TestGenearate(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_smoke(self):
"Basic smoke test that should pickup any silly errors"
import external_naginator
external_naginator.__name__ == "external_naginator"
if __name__ == '
|
__main__':
unittest.main()
|
Jan-zou/LeetCode
|
python/Array/53_maximum_subarray.py
|
Python
|
mit
| 942
| 0.003297
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
For example, given the array [−2,1,−3,4,−1,2,1,−5,4],
the contiguous subarr
|
ay [4,−1,2,1] has the largest sum = 6.
More practice:
If you have figured out the O(n) solution,
try coding another solution using the divide and conquer approach, which is more subtle.
Tags: Array, Dynami
|
c Programming, Divide and Conquer
'''
class Solution(object):
# O(n) runtime; O(1) space - 局部最优和全局最优解法
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
global_max, local_max = float("-inf"), 0
for i in nums:
local_max = max(i, i+local_max)
global_max = max(local_max, global_max)
return global_max
# Divide and Conquer
|
citrix-openstack/horizon
|
horizon/horizon/dashboards/nova/containers/tests.py
|
Python
|
apache-2.0
| 10,798
| 0.000648
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from cloudfiles.errors import ContainerNotEmpty
from django import http
from django import template
from django.contrib import messages
from django.core.urlresolvers import reverse
from mox import IgnoreArg, IsA
from horizon import api
from horizon import test
from .tables import ContainersTable, ObjectsTable
CONTAINER_INDEX_URL = reverse('horizon:nova:containers:index')
class ContainerViewTests(test.BaseViewTests):
def setUp(self):
super(ContainerViewTests, self).setUp()
self.container = api.Container(None)
self.container.name = 'containerName'
self.container.size_used = 128
self.containers = (self.container,)
|
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest), marker=None).AndReturn(
([self.container], False))
self.mox.ReplayAll()
res = self.client.get(CONTAINER_INDEX_URL)
self.assertTemplateUsed(res, 'nova/containers/index.html')
self.assertIn('table', res.context)
containers = res.c
|
ontext['table'].data
self.assertEqual(len(containers), 1)
self.assertEqual(containers[0].name, 'containerName')
def test_delete_container(self):
self.mox.StubOutWithMock(api, 'swift_delete_container')
api.swift_delete_container(IsA(http.HttpRequest),
'containerName')
self.mox.ReplayAll()
action_string = "containers__delete__%s" % self.container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers)
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_delete_container_nonempty(self):
self.mox.StubOutWithMock(api, 'swift_delete_container')
exception = ContainerNotEmpty('containerNotEmpty')
api.swift_delete_container(
IsA(http.HttpRequest),
'containerName').AndRaise(exception)
self.mox.ReplayAll()
action_string = "containers__delete__%s" % self.container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers)
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_create_container_get(self):
res = self.client.get(reverse('horizon:nova:containers:create'))
self.assertTemplateUsed(res, 'nova/containers/create.html')
def test_create_container_post(self):
formData = {'name': 'containerName',
'method': 'CreateContainer'}
self.mox.StubOutWithMock(api, 'swift_create_container')
api.swift_create_container(
IsA(http.HttpRequest), u'containerName')
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:nova:containers:create'),
formData)
self.assertRedirectsNoFollow(res, CONTAINER_INDEX_URL)
class ObjectViewTests(test.BaseViewTests):
CONTAINER_NAME = 'containerName'
def setUp(self):
class FakeCloudFile(object):
def __init__(self):
self.metadata = {}
def sync_metadata(self):
pass
super(ObjectViewTests, self).setUp()
swift_object = api.swift.SwiftObject(FakeCloudFile())
swift_object.name = "test_object"
swift_object.size = '128'
swift_object.container = api.swift.Container(None)
swift_object.container.name = 'container_name'
self.swift_objects = [swift_object]
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_objects')
api.swift_get_objects(
IsA(http.HttpRequest),
self.CONTAINER_NAME,
marker=None).AndReturn((self.swift_objects, False))
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME]))
self.assertTemplateUsed(res, 'nova/objects/index.html')
self.assertItemsEqual(res.context['table'].data, self.swift_objects)
def test_upload_index(self):
res = self.client.get(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]))
self.assertTemplateUsed(res, 'nova/objects/upload.html')
def test_upload(self):
OBJECT_DATA = 'objectData'
OBJECT_FILE = tempfile.TemporaryFile()
OBJECT_FILE.write(OBJECT_DATA)
OBJECT_FILE.flush()
OBJECT_FILE.seek(0)
OBJECT_NAME = 'objectName'
formData = {'method': 'UploadObject',
'container_name': self.CONTAINER_NAME,
'name': OBJECT_NAME,
'object_file': OBJECT_FILE}
self.mox.StubOutWithMock(api, 'swift_upload_object')
api.swift_upload_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME),
OBJECT_DATA).AndReturn(self.swift_objects[0])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]))
self.assertContains(res, 'enctype="multipart/form-data"')
res = self.client.post(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]),
formData)
self.assertRedirectsNoFollow(res,
reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME]))
def test_delete(self):
self.mox.StubOutWithMock(api, 'swift_delete_object')
api.swift_delete_object(
IsA(http.HttpRequest),
self.CONTAINER_NAME, self.swift_objects[0].name)
self.mox.ReplayAll()
OBJECT_INDEX_URL = reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME])
action_string = "objects__delete__%s" % self.swift_objects[0].name
form_data = {"action": action_string}
req = self.factory.post(OBJECT_INDEX_URL, form_data)
kwargs = {"container_name": self.CONTAINER_NAME}
table = ObjectsTable(req, self.swift_objects, **kwargs)
handled = table.maybe_handle()
self.assertEqual(handled['location'], OBJECT_INDEX_URL)
def test_download(self):
OBJECT_DATA = 'objectData'
OBJECT_NAME = 'objectName'
self.mox.StubOutWithMock(api, 'swift_get_object_data')
self.mox.StubOutWithMock(api.swift, 'swift_get_object')
api.swift.swift_get_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME)) \
.AndReturn(self.swift_objects[0])
api.swift_get_object_data(IsA(http.HttpRequest),
|
vileopratama/vitech
|
src/addons/calendar/calendar.py
|
Python
|
mit
| 87,632
| 0.004211
|
# -*- coding: utf-8 -*-
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
import collections
import babel.dates
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import api
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http
|
import request
from operator import itemgetter
from openerp.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000,
|
so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (basestring)):
res = filter(None, calendar_id.split('-'))
if len(res) == 2:
real_id = res[0]
if with_date:
real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT)
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (basestring, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked', ondelete='cascade'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise UserError(_('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, allday=False):
if idate:
if allday:
return openerp.fields.Date.from_string(idate)
else:
return openerp.fields.Datetime.from_string(idate).replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.start or not event_obj.stop:
raise UserError(_("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
event.add('dtstart').value = ics_datetime(event_obj.start, event_obj.allday)
event.add('dtend').value = ics_datetime(event_obj.stop, event_obj.allday)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'Odoo'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False),
template_xmlid='calendar_template_meeting_invitation', force=False, context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
@param force: If set to True, email will be sent to user himself. Usefull for example for alert, ...
"""
res = False
if self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_mail', default=False) or context.get("no_mail_to_attendees"):
return res
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['mail.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context
|
jonaustin/advisoryscan
|
django/django/test/utils.py
|
Python
|
mit
| 6,373
| 0.006433
|
import sys, time
from django.conf import settings
from django.db import connection, transaction, backend
from django.core import management, mail
from django.dispatch import dispatcher
from django.test import signals
from django.template import Template
# The prefix to put on the default database name when creating
# the test database.
TEST_DATABASE_PREFIX = 'test_'
def instrumented_test_render(self, context):
"""An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
dispatcher.send(signal=signals.template_rendered, sender=self, template=self, context=context)
return self.nodelist.render(context)
class TestSMTPConnection(object):
"""A substitute SMTP connection for use during test sessions.
The test connection stores email messages in a dummy outbox,
rather than sending them out on the wire.
"""
def __init__(*args, **kwargs):
pass
def open(self):
"Mock the SMTPConnection open() interface"
pass
def close(self):
"Mock the SMTPConnection close() interface"
pass
def send_messages(self, messages):
"Redirect messages to the dummy outbox"
mail.outbox.extend(messages)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Diverting the email sending functions to a test buffer
"""
Template.original_render = Template.render
Template.render = instrumented_test_render
mail.original_SMTPConnection = mail.SMTPConnection
mail.SMTPConnection = TestSMTPConnection
mail.outbox = []
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template.render = Template.original_render
del Template.ori
|
ginal_render
mail.SMTPConnection = mail.original_SMTPConnection
del mail.original_SMTPConnection
del mail.outbox
def _set_autocommit(connection):
"Make sure a connection is in autocommit mode."
if hasattr(connection.connection, "autocommit"):
con
|
nection.connection.autocommit(True)
elif hasattr(connection.connection, "set_isolation_level"):
connection.connection.set_isolation_level(0)
def get_mysql_create_suffix():
suffix = []
if settings.TEST_DATABASE_CHARSET:
suffix.append('CHARACTER SET %s' % settings.TEST_DATABASE_CHARSET)
if settings.TEST_DATABASE_COLLATION:
suffix.append('COLLATE %s' % settings.TEST_DATABASE_COLLATION)
return ' '.join(suffix)
def get_postgresql_create_suffix():
assert settings.TEST_DATABASE_COLLATION is None, "PostgreSQL does not support collation setting at database creation time."
if settings.TEST_DATABASE_CHARSET:
return "WITH ENCODING '%s'" % settings.TEST_DATABASE_CHARSET
return ''
def create_test_db(verbosity=1, autoclobber=False):
if verbosity >= 1:
print "Creating test database..."
# If we're using SQLite, it's more convenient to test against an
# in-memory database.
if settings.DATABASE_ENGINE == "sqlite3":
TEST_DATABASE_NAME = ":memory:"
else:
suffix = {
'postgresql': get_postgresql_create_suffix,
'postgresql_psycopg2': get_postgresql_create_suffix,
'mysql': get_mysql_create_suffix,
'mysql_old': get_mysql_create_suffix,
}.get(settings.DATABASE_ENGINE, lambda: '')()
if settings.TEST_DATABASE_NAME:
TEST_DATABASE_NAME = settings.TEST_DATABASE_NAME
else:
TEST_DATABASE_NAME = TEST_DATABASE_PREFIX + settings.DATABASE_NAME
# Create the test database and connect to it. We need to autocommit
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
cursor = connection.cursor()
_set_autocommit(connection)
try:
cursor.execute("CREATE DATABASE %s %s" % (backend.quote_name(TEST_DATABASE_NAME), suffix))
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_DATABASE_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database..."
cursor.execute("DROP DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
if verbosity >= 1:
print "Creating test database..."
cursor.execute("CREATE DATABASE %s %s" % (backend.quote_name(TEST_DATABASE_NAME), suffix))
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
connection.close()
settings.DATABASE_NAME = TEST_DATABASE_NAME
management.syncdb(verbosity, interactive=False)
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database.
cursor = connection.cursor()
def destroy_test_db(old_database_name, verbosity=1):
# Unless we're using SQLite, remove the test database to clean up after
# ourselves. Connect to the previous database (not the test database)
# to do so, because it's not allowed to delete a database while being
# connected to it.
if verbosity >= 1:
print "Destroying test database..."
connection.close()
TEST_DATABASE_NAME = settings.DATABASE_NAME
settings.DATABASE_NAME = old_database_name
if settings.DATABASE_ENGINE != "sqlite3":
cursor = connection.cursor()
_set_autocommit(connection)
time.sleep(1) # To avoid "database is being accessed by other users" errors.
cursor.execute("DROP DATABASE %s" % backend.quote_name(TEST_DATABASE_NAME))
connection.close()
|
SpaceVim/SpaceVim
|
bundle/defx.nvim/rplugin/python3/defx/column/size.py
|
Python
|
gpl-3.0
| 1,686
| 0
|
# ============================================================================
# FILE: size.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
from defx.base.column import Base, Highlights
from defx.context import Context
from defx.util import Nvim, readable, Candidate
import typing
class Column(Base):
def __init__(self, vim: Nvim) -> None:
super().__init__(vim)
self.name = 'size'
self.has_get_with_highlights = True
self._length = 9
def get_with_highlights(
self, context: Context, candidate: Candidate
) -> typing.Tuple[str, Highlights]:
path = candidate['action__path']
if not readable(path) or path
|
.is_dir():
return (' ' * self._length, [])
size = self._get_size(path.stat().st_size)
text = '{:>6s}{:>3s}'.format(size[0], size[1])
return (text, [(self.highlight_name, self.start, self._length)])
def _get_size(self, size: float) -> typing.Tuple[str, str]:
multiple = 1024
suffixes = ['KB', 'MB', 'GB', 'TB']
if size < multiple:
return (str(size), 'B')
for suff
|
ix in suffixes:
size /= multiple
if size < multiple:
return ('{:.1f}'.format(size), suffix)
return ('INF', '')
def length(self, context: Context) -> int:
return self._length
def highlight_commands(self) -> typing.List[str]:
commands: typing.List[str] = []
commands.append(
f'highlight default link {self.highlight_name} Constant')
return commands
|
jdpepperman/houseSimulation
|
modules/House.py
|
Python
|
gpl-2.0
| 2,102
| 0.001903
|
import random
from Person import Person
class House(object):
def __init__(self):
self.rooms = []
self.actors = []
def __str__(self):
house_string = ""
for room in self.rooms:
house_string = house_string + str(room) + "\n\n"
return house_string[:-2]
def __iter__(self):
return iter(self.rooms)
def getDictionary(self):
return_dict = {}
for room in self.rooms:
return_dict[room.name] = room.getDictionary()
return return_dict
def getRooms(self):
return self.rooms
def placePersonInRoom(self, person):
for room in self.rooms:
if person in room.actors_in_room:
room.removeActor(person)
placed = False
while not placed:
i = random.randint(0, len(self.rooms) - 1)
if self.rooms[i].can_enter:
self.rooms[i].addActor(person)
placed = True
def addRooms(self, rooms):
for room in rooms:
if room not in self.rooms:
self.rooms.append(room)
def hasRoomType(self, roomType):
for room in self.rooms:
if isinstance(room, roomType):
return T
|
rue
return False
def tick(self):
for actor in self.actors:
actor.tick()
def toString_people(self):
string = "People in house\n[name,\t\tage,\thngr,\tbthrm,\tstatus]:\n"
for actor in self.actors:
if isinstance(actor, Person):
if len(a
|
ctor.name) < 6:
string = (string + "[" + actor.name + ",\t\t" + str(actor.age) + ",\t" +
str(actor.hunger) + ",\t" + str(actor.bathroom_need) + ",\t" +
actor.status + "]\n")
else:
string = (string + "[" + actor.name + ",\t" + str(actor.age) + ",\t" +
str(actor.hunger) + ",\t" + str(actor.bathroom_need) + ",\t" +
actor.status + "]\n")
return string
|
tensorflow/data-validation
|
tensorflow_data_validation/statistics/generators/sparse_feature_stats_generator.py
|
Python
|
apache-2.0
| 7,813
| 0.004736
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that computes statistics used to validate sparse features.
Currently, this module generates the following statistics for each
sparse feature:
- missing_value: Number of examples missing the value_feature.
- missing_index: A RankHistogram from index_name to the number of examples
missing the corresponding index_feature.
- min_length_diff: A RankHistogram from index_name to the minimum of
len(index_feature) - len(value_feature).
- max_length_diff: A RankHistogram from index_name to the maximum of
len(index_feature) - len(value_feature).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Dict, Iterable, List, Text, Tuple, Union
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.statistics.generators.constituents import count_missing_generator
from tensorflow_data_validation.statistics.generators.constituents import length_diff_generator
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple # pylint: disable=g-bad-import-order
# LINT.IfChange(custom_stat_names)
_MAX_LENGTH_DIFF_NAME = 'max_length_diff'
_MIN_LENGTH_DIFF_NAME = 'min_length_diff'
_MISSING_INDEX_NAME = 'missing_index'
_MISSING_VALUE_NAME = 'missing_value'
# LINT.ThenChange(../../anomalies/schema.cc:sparse_feature_custom_stat_names)
# Named tuple containing the FeaturePaths for the value and index features
# that comprise a given sparse feature.
_SparseFeatureComponents = tfx_namedtuple.namedtuple(
'_SparseFeatureComponents', ['value_feature', 'index_features'])
def _get_all_sparse_features(
schema: schema_pb2.Schema
) -> List[Tuple[types.FeaturePath, schema_pb2.SparseFeature]]:
"""Returns all sparse features in a schema."""
def _recursion_helper(
parent_path: types.FeaturePath, container: Union[schema_pb2.Schema,
schema_pb2.StructDomain]
) -> List[Tuple[types.FeaturePath, schema_
|
pb2.SparseFeature]]:
"""Helper function that is used in finding sparse featur
|
es in a tree."""
result = []
for sf in container.sparse_feature:
# Sparse features do not have a struct_domain, so they cannot be parent
# features. Thus, once this reaches a sparse feature, add it to the
# result.
result.append((parent_path.child(sf.name), sf))
for f in container.feature:
if f.type == schema_pb2.STRUCT:
result.extend(
_recursion_helper(parent_path.child(f.name), f.struct_domain))
return result
return _recursion_helper(types.FeaturePath([]), schema)
def _get_components(
sparse_features: Iterable[Tuple[types.FeaturePath,
schema_pb2.SparseFeature]]
) -> Dict[types.FeaturePath, _SparseFeatureComponents]:
"""Returns the index and value feature paths that comprise sparse features."""
# A dict mapping sparse feature paths to their component index and value
# feature paths.
sparse_feature_components = dict()
# The index and value features for a given sparse feature have the same parent
# path as the sparse feature.
for path, feature in sparse_features:
parent_path = path.parent()
value_feature = parent_path.child(feature.value_feature.name)
index_features = set()
for index_feature in feature.index_feature:
index_features.add(parent_path.child(index_feature.name))
sparse_feature_components[path] = _SparseFeatureComponents(
value_feature, index_features)
return sparse_feature_components
class SparseFeatureStatsGenerator(stats_generator.CompositeStatsGenerator):
"""Generates statistics for sparse features."""
def __init__(self,
schema: schema_pb2.Schema,
name: Text = 'SparseFeatureStatsGenerator') -> None:
"""Initializes a sparse feature statistics generator.
Args:
schema: A required schema for the dataset.
name: An optional unique name associated with the statistics generator.
"""
self._sparse_feature_components = _get_components(
_get_all_sparse_features(schema))
# Create length diff generators for each index / value pair and count
# missing generator for all paths.
constituents = []
for _, (value, indices) in self._sparse_feature_components.items():
required_paths = [value] + list(indices)
constituents.append(
count_missing_generator.CountMissingGenerator(value, required_paths))
for index in indices:
constituents.append(
length_diff_generator.LengthDiffGenerator(index, value,
required_paths))
constituents.append(
count_missing_generator.CountMissingGenerator(
index, required_paths))
super(SparseFeatureStatsGenerator, self).__init__(name, constituents,
schema)
def extract_composite_output(self, accumulator):
stats = statistics_pb2.DatasetFeatureStatistics()
for feature_path, (value,
indices) in self._sparse_feature_components.items():
required_paths = [value] + list(indices)
feature_stats = stats.features.add(path=feature_path.to_proto())
feature_stats.custom_stats.add(
name=_MISSING_VALUE_NAME,
num=accumulator[count_missing_generator.CountMissingGenerator.key(
value, required_paths)])
index_features_num_missing_histogram = statistics_pb2.RankHistogram()
max_length_diff_histogram = statistics_pb2.RankHistogram()
min_length_diff_histogram = statistics_pb2.RankHistogram()
for index in sorted(indices):
index_label = index.steps()[-1]
missing_bucket = index_features_num_missing_histogram.buckets.add()
missing_bucket.label = index_label
missing_bucket.sample_count = accumulator[
count_missing_generator.CountMissingGenerator.key(
index, required_paths)]
min_diff, max_diff = accumulator[
length_diff_generator.LengthDiffGenerator.key(
index, value, required_paths)]
max_length_bucket = max_length_diff_histogram.buckets.add()
max_length_bucket.label = index_label
max_length_bucket.sample_count = max_diff
min_length_bucket = min_length_diff_histogram.buckets.add()
min_length_bucket.label = index_label
min_length_bucket.sample_count = min_diff
feature_stats.custom_stats.add(
name=_MISSING_INDEX_NAME,
rank_histogram=index_features_num_missing_histogram)
feature_stats.custom_stats.add(
name=_MAX_LENGTH_DIFF_NAME, rank_histogram=max_length_diff_histogram)
feature_stats.custom_stats.add(
name=_MIN_LENGTH_DIFF_NAME, rank_histogram=min_length_diff_histogram)
return stats
|
murphybytes/royalty
|
models/school.py
|
Python
|
mit
| 841
| 0.042806
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
class School(models.Model):
_name = 'royalty.school'
name = fields.Char('Name', size=255, required=True)
address_line1 = fields.Char( 'Address 1', size=255 )
address_line2 = fields.Char( 'Address 2', size=255 )
city = fields.Char( 'City', size=30 )
state = fields.Char( 'State', size=2 )
zip_code = fields.Char( 'ZipCode', size=10 )
abbreviation = fields.Char( 'Organization Abbreviation', size=75 )
active = fields.Boolean( 'Organization Active', default=True )
old_id = fields.Integer( 'Legacy ID' )
products = fields.One2many( 'p
|
roduct.product', 'school_id', 'Products' )
contacts = fields.One2many( 'royalty.contact', 'school_id', 'Co
|
ntacts' )
|
ujjwal96/mitmproxy
|
test/mitmproxy/proxy/protocol/test_http1.py
|
Python
|
mit
| 3,325
| 0.000902
|
from unittest import mock
import pytest
from mitmproxy.test import tflow
from mitmproxy.net.http import http1
from mitmproxy.net.tcp import TCPClient
from mitmproxy.test.tutils import treq
from ... import tservers
class TestHTTPFlow:
def test_repr(self):
f = tflow.tflow(resp=True, err=True)
assert repr(f)
class TestInvalidRequests(tservers.HTTPProxyTest):
ssl = True
def test_double_connect(self):
p = self.pathoc()
with p.connect():
r = p.request("connect:'%s:%s'" % ("127.0.0.1", self.server2.port))
assert r.status_code == 400
assert b"Unexpected CONNECT" in r.content
def test_relative_request(self):
p = self.pathoc_raw()
with p.connect():
r = p.request("get:/p/200")
assert r.status_code == 400
assert b"Invalid HTTP request form" in r.content
class TestProxyMisconfiguration(tservers.TransparentProxyTest):
def test_absolute_request(self):
p = self.pathoc()
with p.connect():
r = p.request("get:'http://localhost:%d/p/200'" % self.server.port)
assert r.status_code == 400
assert b"misconfiguration" in r.content
class TestExpectHeader(tservers.HTTPProxyTest):
def test_simple(self):
client = TCPClient(("127.0.0.1", self.proxy.port))
client.connect()
# call pathod server, wait a second to complete the request
client.wfile.write(
b"POST http://localhost:%d/p/200 HTTP/1.1\r\n"
b"Expect: 100-continue\r\n"
b"Content-Length: 16\r\n"
b"\r\n" % self.server.port
)
client.wfile.flush()
assert client.rfile.readline() == b"HTTP/1.1 100 Continue\r\n"
assert client.
|
rfile.readline() == b"\r\n"
client.wfile.write(b"0123456789abcdef\r\n")
client.wfile.flush()
resp = http1.read_response(client.rfile, treq())
assert resp.status_code == 200
client.finish()
client.close()
class TestHeadContentLength(tservers.HTTPProxyTest):
def test_head_content_length(self):
p = self.pathoc()
with p.connect():
resp = p.request(
|
"""head:'%s/p/200:h"Content-Length"="42"'""" % self.server.urlbase
)
assert resp.headers["Content-Length"] == "42"
class TestStreaming(tservers.HTTPProxyTest):
@pytest.mark.parametrize('streaming', [True, False])
def test_streaming(self, streaming):
class Stream:
def requestheaders(self, f):
f.request.stream = streaming
def responseheaders(self, f):
f.response.stream = streaming
def assert_write(self, v):
if streaming:
assert len(v) <= 4096
return self.o.write(v)
self.master.addons.add(Stream())
p = self.pathoc()
with p.connect():
with mock.patch("mitmproxy.net.tcp.Writer.write", side_effect=assert_write, autospec=True):
# response with 10000 bytes
r = p.request("post:'%s/p/200:b@10000'" % self.server.urlbase)
assert len(r.content) == 10000
# request with 10000 bytes
assert p.request("post:'%s/p/200':b@10000" % self.server.urlbase)
|
liyi193328/seq2seq
|
run_scripts/vocab.py
|
Python
|
apache-2.0
| 3,713
| 0.011869
|
#encoding=utf-8
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = "liyi"
__date__ = "2017-07-06"
import os
import sys
import argparse
import collections
import logging
import codecs
import charset
def generate_vocab(source_paths, save_path, delimiter=" ", max_vocab_size=150000, min_freq=10,
filter_en=True, filter_num=True, verb=True):
# Counter for all tokens in the vocabulary
vocab_cnt = collections.Counter()
for i, path in enumerate(source_paths):
f = codecs.open(path, "r", "utf-8")
while True:
line = f.readline()
if not line:
break
if delimiter == "":
tokens = list(line.strip())
else:
tokens = line.strip().split(delimiter)
tokens = [_ for _ in tokens if len(_) > 0]
vocab_cnt.update(tokens)
##filter vocab
if filter_en is True or filter_num is True:
new_vocab_cnt = collections.Counter()
for word in vocab_cnt:
skip = False
for index, char in enumerate(word):
if filter_en and charset.is_alphabet(char):
skip = True
elif filter_num and charset.is_number(char):
skip = True
elif charset.is_chinese_punctuation(char): ##solve 。榜样
if len(word) > 1:
print("{} is not right".format(word))
skip =
|
True
if skip is True:
break
if skip is False:
new_vocab_cnt[word] = vocab_cnt[word]
vocab_cnt = new_vocab_cnt
logging.info("Found %d unique tokens in the vocabulary.", len(vocab_cnt))
# Filter tokens below the frequency threshold
if min_freq > 0:
filtered_tokens = [(w,
|
c) for w, c in vocab_cnt.most_common()
if c > min_freq]
cnt = collections.Counter(dict(filtered_tokens))
logging.info("Found %d unique tokens with frequency > %d.",
len(vocab_cnt), min_freq)
# Sort tokens by 1. frequency 2. lexically to break ties
word_with_counts = vocab_cnt.most_common()
word_with_counts = sorted(
word_with_counts, key=lambda x: (x[1], x[0]), reverse=True)
# Take only max-vocab
if max_vocab_size is not None:
word_with_counts = word_with_counts[:max_vocab_size]
if save_path is not None:
save_path = os.path.abspath(save_path)
if os.path.exists(os.path.dirname(save_path)) == False:
os.makedirs(os.path.dirname(save_path))
with codecs.open(save_path, "w", "utf-8") as f:
for word, count in word_with_counts:
# print("{}\t{}".format(word, count))
f.write("{}\t{}\n".format(word, count))
print("generate vocab path {}".format(save_path))
return word_with_counts
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate vocabulary for a tokenized text file.")
parser.add_argument(
"--min_frequency",
dest="min_frequency",
type=int,
default=0,
help="Minimum frequency of a word to be included in the vocabulary.")
parser.add_argument(
"--max_vocab_size",
dest="max_vocab_size",
type=int,
help="Maximum number of tokens in the vocabulary")
parser.add_argument(
"--downcase",
dest="downcase",
type=bool,
help="If set to true, downcase all text before processing.",
default=False)
parser.add_argument(
"infile",
nargs="+",
type=str,
help="Input tokenized text file to be processed.")
parser.add_argument(
"--delimiter",
dest="delimiter",
type=str,
default=" ",
help="Delimiter character for tokenizing. Use \" \" and \"\" for word and char level respectively."
)
args = parser.parse_args()
|
Pontianak/Python-For-Informatics-Assignments
|
2.2.py
|
Python
|
mit
| 58
| 0.017241
|
name = raw_input("En
|
ter your name:")
print
|
"Hello" , name
|
tomjelinek/pcs
|
pcs/cli/common/parse_args.py
|
Python
|
gpl-2.0
| 21,092
| 0.000379
|
from typing import (
Iterable,
Mapping,
Union,
)
from pcs.cli.common.errors import (
SEE_MAN_CHANGES,
CmdLineInputError,
)
from pcs.common.str_tools import (
format_list,
format_plural,
)
from pcs.common.tools import timeout_to_seconds
ModifierValueType = Union[None, bool, str]
ARG_TYPE_DELIMITER = "%"
# h = help, f = file,
# p = password (cluster auth), u = user (cluster auth),
PCS_SHORT_OPTIONS = "hf:p:u:"
PCS_LONG_OPTIONS = [
"debug",
"version",
"help",
"fullhelp",
"force",
"skip-offline",
"autodelete",
"simulate",
"all",
"full",
"local",
"wait",
"config",
"start",
"enable",
"disabled",
"off",
"request-timeout=",
"brief",
# resource (safe-)disable
"safe",
"no-strict",
# resource cleanup | refresh
"strict",
"pacemaker",
"corosync",
"no-default-ops",
"defaults",
"nodesc",
"master", # deprecated, replaced by --promoted
"promoted",
"name=",
"group=",
"node=",
"from=",
"to=",
"after=",
"before=",
"corosync_conf=",
"booth-conf=",
"booth-key=",
"no-watchdog-validation",
"no-keys-sync",
# in pcs status - do not display resource status on inactive node
"hide-inactive",
# pcs resource (un)manage - enable or disable monitor operations
"monitor",
# TODO remove
# used only in deprecated 'pcs resource|stonith show'
"groups",
# "pcs resource clear --expired" - only clear expired moves and bans
"expired",
# disable evaluating whether rules are expired
"no-expire-check",
# allow overwriting existing files, currently meant for / used in CLI only
"overwrite",
# output format of commands, e.g: json, cmd, text, ...
"output-format=",
# auth token
"token=",
]
def split_list(arg_list, separator):
"""return list of list of arg_list using separator as delimiter"""
separator_indexes = [i for i, x in enumerate(arg_list) if x == separator]
bounds = zip(
[0] + [i + 1 for i in separator_indexes], separator_indexes + [None]
)
return [arg_list[i:j] for i, j in bounds]
def split_list_by_any_keywords(arg_list, keyword_label):
"""
Return a list of lists of args using any arg not containing = as a delimiter
iterable arg_list -- (part of) argv
string keyword_label -- description of all keywords
"""
if "=" in arg_list[0]:
raise CmdLineInputError(
"Invalid character '=' in {} '{}'".format(
keyword_label,
arg_list[0],
)
)
current_keyword = None
groups = {}
for arg in arg_list:
if "=" in arg:
groups[current_keyword].append(arg)
else:
current_keyword = arg
if current_keyword in groups:
raise CmdLineInputError(
"{} '{}' defined multiple times".format(
keyword_label.capitalize(), current_keyword
)
)
groups[current_keyword] = []
return groups
def split_option(arg, allow_empty_value=True):
"""
Get (key, value) from a key=value commandline argument.
Split the argument by the first = and return resulting parts. Raise
CmdLineInputError if the argument cannot be splitted.
string arg -- commandline argument
allow_empty_value -- if True, empty value is allowed. Otherwise,
CmdLineInputError exception is raised
Commandline options: no options
"""
if "=" not in arg:
raise CmdLineInputError("missing value of '{0}' option".format(arg))
if arg.startswith("="):
raise CmdLineInputError("missing key in '{0}' option".format(arg))
key, value = arg.split("=", 1)
if not (value or allow_empty_value):
raise CmdLineInputError("value of '{0}' option is empty".format(key))
return key, value
def prepare_options(cmdline_args, allowed_repeatable_options=()):
"""
Get a dict of options from cmdline key=value args
iterable cmdline_args -- command line arguments
iterable allowed_repeatable_options -- options that can be set several times
Commandline options: no options
"""
options = {}
for arg in cmdline_args:
name, value = split_option(arg)
if name not in options:
if name in allowed_repeatable_options:
|
options[name] = [value]
else:
options[name] = value
elif name in allowed_repeatable_options:
options[name].append(value)
elif options[name] != value:
raise CmdLineInputError(
(
"duplicate option '{0}' with different values '{1}' and "
"'{2}'"
).format(name, options[name], value)
)
|
return options
def prepare_options_allowed(
cmdline_args, allowed_options, allowed_repeatable_options=()
):
"""
Get a dict of options from cmdline key=value args, raise on unallowed key
iterable cmdline_args -- command line arguments
iterable allowed_options -- list of allowed options
iterable allowed_repeatable_options -- options that can be set several times
Commandline options: no options
"""
parsed_options = prepare_options(
cmdline_args, allowed_repeatable_options=allowed_repeatable_options
)
unknown_options = frozenset(parsed_options.keys()) - frozenset(
allowed_options
)
if unknown_options:
raise CmdLineInputError(
"Unknown option{s} '{options}'".format(
s=("s" if len(unknown_options) > 1 else ""),
options="', '".join(sorted(unknown_options)),
)
)
return parsed_options
def group_by_keywords(
arg_list,
keyword_set,
implicit_first_group_key=None,
keyword_repeat_allowed=True,
group_repeated_keywords=None,
only_found_keywords=False,
):
"""
Return dictionary with keywords as keys and following arguments as value.
For example when keywords are "first" and "seconds" then for arg_list
["first", 1, 2, "second", 3] it returns {"first": [1, 2], "second": [3]}
list arg_list is commandline arguments containing keywords
set keyword_set contain all expected keywords
string implicit_first_group_key is the key for capturing of arguments before
the occurrence of the first keyword. implicit_first_group_key is not
a keyword => its occurence in args is considered as ordinary argument.
bool keyword_repeat_allowed is the flag to turn on/off checking the
uniqueness of each keyword in arg_list.
list group_repeated_keywords contains keywords for which each occurence is
packed separately. For example when keywords are "first" and "seconds"
and group_repeated_keywords is ["first"] then for arg_list
["first", 1, 2, "second", 3, "first", 4] it returns
{"first": [[1, 2], [4]], "second": [3]}.
For these keywords is allowed repeating.
bool only_found_keywords is flag for deciding to (not)contain keywords
that do not appeared in arg_list.
"""
def get_keywords_for_grouping():
if not group_repeated_keywords:
return []
# implicit_first_group_key is not keyword: when it is in
# group_repeated_keywords but not in keyword_set is considered as
# unknown.
unknown_keywords = set(group_repeated_keywords) - set(keyword_set)
if unknown_keywords:
# to avoid developer mistake
raise AssertionError(
"Keywords in grouping not in keyword set: {0}".format(
", ".join(unknown_keywords)
)
)
return group_repeated_keywords
def get_completed_groups():
completed_groups = groups.copy()
if not only_found_keywords:
for keyword in keyword_set:
if keyword not in completed_groups:
completed_groups[keyword] = []
if (
implicit_first_group_key
|
Zing22/Moogle
|
moogle/mooc/apps.py
|
Python
|
mit
| 83
| 0
|
from
|
django.apps import AppConfig
class MoocConfig(AppConfig
|
):
name = 'mooc'
|
escorciav/amcs211
|
hw3/hw3_2a.py
|
Python
|
bsd-2-clause
| 502
| 0.007968
|
import sympy
x1, x2 = sympy.symbols('x1 x2')
f = 100*(x2 - x1**2)**2 + (1-x1)**2
df_dx1 = sympy.diff(f,x1)
df_dx2 = sympy.diff(f,x2)
H = sympy.hessian(f, (x1, x2))
xs = sympy.solve([df_dx1, df_dx2], [x1, x2])
H_xs = H.subs([(x1,xs[0][0]), (x2,xs[0][1])])
lambda_xs = H_xs.eigenvals()
count = 0
for i in lambda_xs.keys():
if i.ev
|
alf() <= 0:
count += 1
if count == 0:
prin
|
t 'Local minima'
elif count == len(lambda_xs.keys()):
print 'Lacal maxima'
else:
print 'Saddle point'
|
supriti/DeepSea
|
srv/modules/runners/net.py
|
Python
|
gpl-3.0
| 8,475
| 0.009558
|
# -*- coding: utf-8 -*-
import logging
import re
import salt.client
from netaddr import IPNetwork, IPAddress
log = logging.getLogger(__name__)
def ping(cluster = None, exclude = None, **kwargs):
"""
Ping all addresses from all addresses on all minions. If cluster is passed,
restrict addresses to public and cluster networks.
Note: Some optimizations could be done here in the multi module (such as
skipping the source and destination when they are the same). However, the
unoptimized version is taking ~2.5 seconds on 18 minions with 72 addresses
for success. Failures take between 6 to 12 seconds. Optimizations should
focus there.
TODO: Convert commented out print statements to log.debug
CLI Example: (Before DeepSea with a cluster configuration)
.. code-block:: bash
sudo salt-run net.ping
or you can run it with exclude
.. code-block:: bash
sudo salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"
(After DeepSea with a cluster configuration)
.. code-block:: bash
sudo salt-run net.ping cluster=ceph
sudo salt-run net.ping ceph
"""
exclude_string = exclude_iplist = None
if exclude:
exclude_string, exclude_iplist = _exclude_filter(exclude)
extra_kwargs = _skip_dunder(kwargs)
if _skip_dunder(kwargs):
print "Unsupported parameters: {}".format(" ,".join(extra_kwargs.keys()))
text = re.sub(re.compile("^ {12}", re.MULTILINE), "", '''
salt-run net.ping [cluster] [exclude]
Ping all addresses from all addresses on all minions.
If cluster is specified, restrict addresses to cluster and public networks.
If exclude is specified, remove matching addresses. See Salt compound matchers.
within exclude individual ip address will be remove a specific target interface
instead of ping from, the ping to interface will be removed
Examples:
salt-run net.ping
salt-run net.ping ceph
salt-run net.ping ceph L@mon1.ceph
salt-run net.ping cluster=ceph exclude=L@mon1.ceph
salt-run net.ping exclude=S@192.168.21.254
salt-run net.ping exclude=S@192.168.21.0/29
salt-run net.ping exclude="E@host*,host-osd-name*,192.168.1.1"
''')
print text
return
local = salt.client.LocalClient()
if cluster:
search = "I@cluster:{}".format(cluster)
if exclude_string:
search += " and not ( " + exclude_string + " )"
log.debug( "ping: search {} ".format(search))
networks = local.cmd(search , 'pillar.item', [ 'cluster_network', 'public_network' ], expr_form="compound")
#print networks
total = local.cmd(search , 'grains.get', [ 'ipv4' ], expr_form="compound")
#print addresses
addresses = []
for host in sorted(total.iterkeys()):
if 'cluster_network' in networks[host]:
addresses.extend(_address(total[host], networks[host]['cluster_network']))
if 'public_network' in networks[host]:
addresses.extend(_address(total[host], networks[host]['public_network']))
else:
search = "*"
if exclude_string:
search += " and not ( " + exclude_string + " )"
|
log.debug( "ping: search {} ".format(search))
addresses = local.cmd(search , 'grains.get', [ 'ipv4' ], expr_form="compound")
addresses = _flatten(addresses.values())
# Lazy loopback removal - use ipaddress when adding IPv6
try:
if addresses:
addresses.remove('127.0.0.1')
if exclude_iplist:
fo
|
r ex_ip in exclude_iplist:
log.debug( "ping: removing {} ip ".format(ex_ip))
addresses.remove(ex_ip)
except ValueError:
log.debug( "ping: remove {} ip doesn't exist".format(ex_ip))
pass
#print addresses
results = local.cmd(search, 'multi.ping', addresses, expr_form="compound")
#print results
_summarize(len(addresses), results)
def _address(addresses, network):
"""
Return all addresses in the given network
Note: list comprehension vs. netaddr vs. simple
"""
matched = []
for address in addresses:
if IPAddress(address) in IPNetwork(network):
matched.append(address)
return matched
def _exclude_filter(excluded):
"""
Internal exclude_filter return string in compound format
Compound format = {'G': 'grain', 'P': 'grain_pcre', 'I': 'pillar',
'J': 'pillar_pcre', 'L': 'list', 'N': None,
'S': 'ipcidr', 'E': 'pcre'}
IPV4 address = "255.255.255.255"
hostname = "myhostname"
"""
log.debug( "_exclude_filter: excluding {}".format(excluded))
excluded = excluded.split(",")
log.debug( "_exclude_filter: split ',' {}".format(excluded))
pattern_compound = re.compile("^.*([GPIJLNSE]\@).*$")
pattern_iplist = re.compile( "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" )
pattern_ipcidr = re.compile( "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$")
pattern_hostlist = re.compile( "^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9-]*[a-zA-Z0-9]).)*([A-Za-z]|[A-Za-z][A-Za-z0-9-]*[A-Za-z0-9])$")
compound = []
ipcidr = []
iplist = []
hostlist = []
regex_list = []
for para in excluded:
if pattern_compound.match(para):
log.debug( "_exclude_filter: Compound {}".format(para))
compound.append(para)
elif pattern_iplist.match(para):
log.debug( "_exclude_filter: ip {}".format(para))
iplist.append(para)
elif pattern_ipcidr.match(para):
log.debug( "_exclude_filter: ipcidr {}".format(para))
ipcidr.append("S@"+para)
elif pattern_hostlist.match(para):
hostlist.append("L@"+para)
log.debug( "_exclude_filter: hostname {}".format(para))
else:
regex_list.append("E@"+para)
log.debug( "_exclude_filter: not sure but likely Regex host {}".format(para))
#if ipcidr:
# log.debug("_exclude_filter ip subnet is not working yet ... = {}".format(ipcidr))
new_compound_excluded = " or ".join(compound + hostlist + regex_list + ipcidr)
log.debug("_exclude_filter new formed compound excluded list = {}".format(new_compound_excluded))
if new_compound_excluded and iplist:
return new_compound_excluded, iplist
elif new_compound_excluded:
return new_compound_excluded, None
elif iplist:
return None, iplist
else:
return None, None
def _flatten(l):
"""
Flatten a array of arrays
"""
log.debug( "_flatten: {}".format(l))
return list(set(item for sublist in l for item in sublist))
def _summarize(total, results):
"""
Summarize the successes, failures and errors across all minions
"""
success = []
failed = []
errored = []
slow = []
log.debug( "_summarize: results {}".format(results))
for host in sorted(results.iterkeys()):
if results[host]['succeeded'] == total:
success.append(host)
if 'failed' in results[host]:
failed.append("{} from {}".format(results[host]['failed'], host))
if 'errored' in results[host]:
errored.append("{} from {}".format(results[host]['errored'], host))
if 'slow' in results[host]:
slow.append("{} from {} average rtt {}".format(results[host]['slow'], host, "{0:.2f}".format(results[host]['avg'])))
if success:
avg = sum( results[host].get('avg') for host in results) / len(results)
else:
avg = 0
print "Succeeded: {} addresses from {} minions average rtt {} ms".format(total, len(success), "{0:.2f}".format(avg))
if slow:
print "Warning: \n {}".format("\n ".join(slow))
if failed:
|
patrickleweryharris/anagram-solver
|
anagram_solver/anagram_solver.py
|
Python
|
mit
| 1,532
| 0
|
#!/usr/local/bin/python3.5
import itertools
import sys
from .stuff import word_set
__version__ = "1.1.0"
def find_possible(lst):
"""
Return all possible combinations of letters in lst
@type lst: [str]
@rtype: [str]
"""
returned_list = []
for i in range(0, len(lst) + 1):
for subset in itertools.permutations(lst, i):
|
possible = ''
for letter in subset:
possible += letter
if len(possible) == len(lst):
# itertools.permutations returns smaller lists
r
|
eturned_list.append(possible)
return returned_list
def return_words(lst, word_set):
"""
Return combinations in that are words in word_set
@type lst: [str]
@type word_set: set(str)
@rtype: [str]
"""
returned_list = []
for word in lst:
if word in word_set or word.capitalize() in word_set:
# Some words are capitalized in the word_set
returned_list.append(word)
return returned_list
def main():
"""
Main function to run the program
"""
anagram_lst = []
anagram = sys.argv[1]
for char in anagram:
anagram_lst.append(char)
possible_words = find_possible(anagram_lst)
actual_words = return_words(possible_words, word_set)
print('Solutions:')
if len(actual_words) == 0:
print('None found')
else:
for item in set(actual_words):
# Running through in set form prevents duplicates
print(item)
|
nok/sklearn-porter
|
examples/estimator/classifier/RandomForestClassifier/js/basics_embedded.pct.py
|
Python
|
mit
| 1,213
| 0.004122
|
# %% [markdown]
# # sklearn-porter
#
# Repository: [https://github.com/nok/sklearn-porter](https://github.com/nok/sklearn-porter)
#
# ## RandomForestClassifier
#
# Documentation: [sklearn.ensemble.RandomForestClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html)
# %%
import sys
sys.path.append('../../../../..')
# %% [markdown]
# ### Load data
# %%
from sklearn.datasets import load_iris
iris_data = load_iris()
X = iris_data.data
y = iris_data.target
print(X.shape, y.shape)
# %% [markdown]
# ### Train classifier
# %%
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=15, max_depth=None,
min_samples_split=2, random_state=0)
clf.fit(X, y)
# %% [markdown]
# ### Transpile classifier
# %%
from sklearn_porter import Porter
porter = Porter(clf, language='js')
output = porter.export(embed_data=True)
print(output)
# %% [markdown]
# ### Run classification in JavaScript
# %%
# Save classifier:
# with open('RandomForestClassifier.js', 'w') as f:
# f.write(output)
# Run classification:
# if hash n
|
ode 2/dev/null; then
# node RandomForestClas
|
sifier.js 1 2 3 4
# fi
|
evernym/plenum
|
plenum/bls/bls_bft_replica_plenum.py
|
Python
|
apache-2.0
| 16,403
| 0.002561
|
from typing import Optional
from common.serializers.serialization import state_roots_serializer
from crypto.bls.bls_bft import BlsBft
from crypto.bls.bls_bft_replica import BlsBftReplica
from crypto.bls.bls_multi_signature import MultiSignature, MultiSignatureValue
from crypto.bls.indy_crypto.bls_crypto_indy_crypto import IndyCryptoBlsUtils
from plenum.common.constants import BLS_PREFIX, AUDIT_LEDGER_ID, TXN_PAYLOAD, \
TXN_PAYLOAD_DATA, AUDIT_TXN_LEDGER_ROOT, AUDIT_TXN_STATE_ROOT, AUDIT_TXN_PP_SEQ_NO
from plenum.common.messages.node_messages import PrePrepare, Prepare, Commit
from plenum.common.metrics_collector import MetricsCollector, NullMetricsCollector, measure_time, MetricsName
from plenum.common.types import f
from plenum.common.util import compare_3PC_keys
from plenum.server.consensus.utils import replica_name_to_node_name
from plenum.server.database_manager import DatabaseManager
from stp_core.common.log import getlogger
logger = getlogger()
class BlsBftReplicaPlenum(BlsBftReplica):
def __init__(self,
node_id,
bls_bft: BlsBft,
is_master,
database_manager: DatabaseManager,
metrics: MetricsCollector = NullMetricsCollector()):
super().__init__(bls_bft, is_master)
self._all_bls_latest_multi_sigs = None
self.node_id = node_id
self._database_manager = database_manager
self._all_signatures = {}
self.state_root_serializer = state_roots_serializer
self.metrics = metrics
def _can_process_ledger(self, ledger_id):
# enable BLS for all ledgers
return True
# ----VALIDATE----
@measure_time(MetricsName.BLS_VALIDATE_PREPREPARE_TIME)
def validate_pre_prepare(self, pre_prepare: PrePrepare, sender):
if f.BLS_MULTI_SIGS.nm in pre_prepare and pre_prepare.blsMultiSigs:
multi_sigs = pre_prepare.blsMultiSigs
for sig in multi_sigs:
multi_sig = MultiSignature.from_list(*sig)
if not self._validate_multi_sig(multi_sig):
return BlsBftReplica.PPR_BLS_MULTISIG_WRONG
def validate_prepare(self, prepare: Prepare, sender):
pass
@measure_time(MetricsName.BLS_VALIDATE_COMMIT_TIME)
def validate_commit(self, commit: Commit, sender, pre_prepare: PrePrepare):
if f.BLS_SIGS.nm not in commit:
return
audit_txn = self._get_correct_audit_transaction(pre_prepare)
if not audit_txn:
return
audit_payload = audit_txn[TXN_PAYLOAD]
|
[TXN_PAYLOAD_DATA]
|
for lid, sig in commit.blsSigs.items():
lid = int(lid)
if lid not in audit_payload[AUDIT_TXN_STATE_ROOT] or lid not in audit_payload[AUDIT_TXN_LEDGER_ROOT]:
return BlsBftReplicaPlenum.CM_BLS_SIG_WRONG
if not self._validate_signature(sender, sig,
BlsBftReplicaPlenum._create_fake_pre_prepare_for_multi_sig(
lid,
audit_payload[AUDIT_TXN_STATE_ROOT][lid],
audit_payload[AUDIT_TXN_LEDGER_ROOT][lid],
pre_prepare
)):
return BlsBftReplicaPlenum.CM_BLS_SIG_WRONG
# ----CREATE/UPDATE----
@measure_time(MetricsName.BLS_UPDATE_PREPREPARE_TIME)
def update_pre_prepare(self, pre_prepare_params, ledger_id):
if not self._can_process_ledger(ledger_id):
return pre_prepare_params
if self._all_bls_latest_multi_sigs is not None:
# update BLS_MULTI_SIGS only (not BLS_MULTI_SIG)
# Pass None for backward compatibility
pre_prepare_params.append(None)
pre_prepare_params.append([val.as_list() for val in self._all_bls_latest_multi_sigs])
self._all_bls_latest_multi_sigs = None
return pre_prepare_params
def update_prepare(self, prepare_params, ledger_id):
# Send BLS signature in COMMITs only
return prepare_params
@measure_time(MetricsName.BLS_UPDATE_COMMIT_TIME)
def update_commit(self, commit_params, pre_prepare: PrePrepare):
ledger_id = pre_prepare.ledgerId
state_root_hash = pre_prepare.stateRootHash
if not self._can_process_ledger(ledger_id):
return commit_params
if not self._bls_bft.can_sign_bls():
logger.debug("{}{} can not sign COMMIT {} for state {}: No BLS keys"
.format(BLS_PREFIX, self, commit_params, state_root_hash))
return commit_params
# update BLS_SIGS only (not BLS_SIG)
# Use ' ' as BLS_SIG for backward-compatibility as BLS_SIG in COMMIT is optional but not Nullable
commit_params.append(' ')
last_audit_txn = self._get_correct_audit_transaction(pre_prepare)
if last_audit_txn:
res = {}
payload_data = last_audit_txn[TXN_PAYLOAD][TXN_PAYLOAD_DATA]
for ledger_id in payload_data[AUDIT_TXN_STATE_ROOT].keys():
fake_pp = BlsBftReplicaPlenum._create_fake_pre_prepare_for_multi_sig(
ledger_id,
payload_data[AUDIT_TXN_STATE_ROOT].get(ledger_id),
payload_data[AUDIT_TXN_LEDGER_ROOT].get(ledger_id),
pre_prepare
)
bls_signature = self._sign_state(fake_pp)
logger.debug("{}{} signed COMMIT {} for state {} with sig {}"
.format(BLS_PREFIX, self, commit_params, state_root_hash, bls_signature))
res[str(ledger_id)] = bls_signature
commit_params.append(res)
return commit_params
# ----PROCESS----
def process_pre_prepare(self, pre_prepare: PrePrepare, sender):
# does not matter which ledger id is current PPR for
# mult-sig is for domain ledger anyway
self._save_multi_sig_shared(pre_prepare)
def process_prepare(self, prepare: Prepare, sender):
pass
def process_commit(self, commit: Commit, sender):
key_3PC = (commit.viewNo, commit.ppSeqNo)
if f.BLS_SIGS.nm in commit and commit.blsSigs is not None:
if key_3PC not in self._all_signatures:
self._all_signatures[key_3PC] = {}
for ledger_id in commit.blsSigs.keys():
if ledger_id not in self._all_signatures[key_3PC]:
self._all_signatures[key_3PC][ledger_id] = {}
self._all_signatures[key_3PC][ledger_id][self.get_node_name(sender)] = commit.blsSigs[ledger_id]
def process_order(self, key, quorums, pre_prepare):
if not self._can_process_ledger(pre_prepare.ledgerId):
return
if not self._can_calculate_multi_sig(key, quorums):
return
# calculate signature always to keep master and non-master in sync
# but save on master only
all_bls_multi_sigs = self._calculate_all_multi_sigs(key, pre_prepare)
if not self._is_master:
return
if all_bls_multi_sigs:
for bls_multi_sig in all_bls_multi_sigs:
self._save_multi_sig_local(bls_multi_sig)
self._all_bls_latest_multi_sigs = all_bls_multi_sigs
# ----GC----
def gc(self, key_3PC):
keys_to_remove = []
for key in self._all_signatures.keys():
if compare_3PC_keys(key, key_3PC) >= 0:
keys_to_remove.append(key)
for key in keys_to_remove:
self._all_signatures.pop(key, None)
# ----MULT_SIG----
def _create_multi_sig_value_for_pre_prepare(self, pre_prepare: PrePrepare, pool_state_root_hash):
multi_sig_value = MultiSignatureValue(ledger_id=pre_prepare.ledgerId,
state_root_hash=pre_prepare.stateRootHash,
pool_state_root_hash=pool_state_root_hash,
txn_root_hash=pre_prepare.txnRootHash,
|
Avinash-Raj/appengine-django-skeleton
|
todo/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 599
| 0.001669
|
# -*- c
|
oding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-28 14:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
|
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('s_no', models.IntegerField()),
('name', models.CharField(max_length=200)),
],
),
]
|
helixyte/TheLMA
|
thelma/repositories/rdb/schema/tables/moleculedesignlibrary.py
|
Python
|
mit
| 1,817
| 0.002201
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Molecule design set library table.
"""
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
|
__docformat__ = "reStructuredText en"
__all__ = ['create_table']
def create_table(metadata, rack_layout_tbl, molecule_design_pool_set_tbl):
"Table factory."
tbl = Table('molecule_design_library', metadata,
Column('molecule_design_library_id', Integer, primary_key=True),
Column('molecule_design_pool_set_id', Integer,
ForeignKey(molecule_design_pool_set_tbl.c.\
|
molecule_design_pool_set_id,
ondelete='CASCADE', onupdate='CASCADE'),
nullable=False),
Column('label', String, nullable=False, unique=True),
Column('final_volume', Float,
CheckConstraint('final_volume > 0'),
nullable=False),
Column('final_concentration', Float,
CheckConstraint('final_concentration > 0'),
nullable=False),
Column('number_layouts', Integer,
CheckConstraint('number_layouts > 0'),
nullable=False),
Column('rack_layout_id', Integer,
ForeignKey(rack_layout_tbl.c.rack_layout_id,
onupdate='CASCADE', ondelete='CASCADE'),
nullable=False)
)
return tbl
|
maxim-borisyak/craynn
|
craynn/updates/__init__.py
|
Python
|
mit
| 121
| 0.008264
|
from .careful import *
from .noisy import *
|
from . import streams
from . import resetable
from
|
lasagne.updates import *
|
motlib/mqtt-ts
|
src/pub/evtpub.py
|
Python
|
gpl-3.0
| 1,817
| 0.003302
|
'''Publish sensor events to MQTT broker.'''
import logging
import paho.mqtt.publish as mqtt_pub
import paho.mqtt.client as mqtt
import socket
class MqttPublisher():
'''Publish sensor events to an MQTT broker.'''
def __init__(self, broker, topic_prefix='/sensors'):
'''Initialize a MqttPublisher instance.'''
self.broker = broker
# TODO: Choose between hostname and fqdn
self.node_name = socket.gethostname()
self.topic_prefix = topic_prefix
def get_topic(self, evt):
'''Generate the MQTT topic for the event.'''
data = {
'prefix': self.topic_prefix,
'node': self.node_name,
'sensor': evt.getSensorName(),
'quantity': evt.getQuantity(),
}
path_tmpl = '{prefix}/{node}/{sensor}/{quantity}'
return path_tmpl.format(**data)
def publish_event(self, evt):
'''Publish a single sensor event.'''
# The publish might fail, e.g. due to network problems. Just log
|
# the exception and try again next time.
try:
topic = self.get_topic(evt)
msg = "Publishing to topic '{0}'."
logging.debug(msg.format(topic))
# This fixes the protocol version to MQTT v3.1, because
# the current version of the MQTT broker available in
# raspbian does not support MQTT v3.1.1.
mqtt_pub.single(
topic=topic,
payload=evt.toJSON(),
host
|
name=self.broker,
protocol=mqtt.MQTTv31)
except:
logging.exception('Publish of MQTT value failed.')
def publish_events(self, evts):
'''Publish a list of sensor events.'''
for evt in evts:
self.publish_event(evt)
|
shiroyuki/passerine
|
test/ft/db/test_uow_association_one_to_many.py
|
Python
|
mit
| 4,063
| 0.001969
|
from ft.db.dbtestcase import DbTestCase
from passerine.db.session import Session
from passerine.db.common import ProxyObject
from passerine.db.uow import Record
from passerine.db.entity import entity
from passerine.db.mapper import link, CascadingType, AssociationType
@entity('test_db_uow_ass_one_to_many_computer')
class Computer(object):
def __init__(self, name):
self.name = name
@link('computer', Computer, association=AssociationType.ONE_TO_ONE)
@link('delegates', association=AssociationType.ONE_TO_MANY, cascading=[CascadingType.PERSIST, CascadingType.DELETE])
@entity('test_db_uow_ass_one_to_many_developer')
class Developer(object):
def __init__(self, name, computer=None, delegates=[]):
self.name = name
self.computer = computer
self.delegates = delegates
class TestDbUowAssociationOneToMany(DbTestCase):
def setUp(self):
self._setUp()
self._reset_db(self.__data_provider())
def test_fetching(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
self.assertIsInstance(boss.delegates, list)
self.assertIsInstance(boss.delegates[0], ProxyObject)
self.assertEqual(boss.delegates[0].name, 'a')
self.assertIsInstance(boss.delegates[1], ProxyObject)
self.assertEqual(boss.delegates[1].name, 'b')
def test_cascading_on_persist(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
boss.delegates[0].name = 'assistant'
self.session.persist(boss)
self.session.flush()
data = c.driver.find_one(c.name, {'_id': boss.delegates[0].id})
self.assertEqual(boss.delegates[0].name, data['name'])
def test_update_association(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
boss.delegates[0].name = 'assistant'
boss.delegates.append(Developer('c'))
self.session.persist(boss)
self.session.flush()
data = c.driver.find_one(c.name, {'name': 'c'})
self.assertIsNotNone(data)
self.assertEqual(boss.delegates[2].id, data['_id'])
data = c.driver.find_one(c.name, {'name': 'boss'})
self.assertEqual(3, len(data['delegates']))
for delegate in boss.delegates:
self.assertIn(delegate.id, data['delegates'])
def test_cascading_on_delete_with_some_deps(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
boss.delegates[0].name = 'assistant'
boss.delegates.append(Developer('c'))
self.session.persist(boss)
architect = Developer('architect', delegates=[boss.delegates[0]])
self.session.persist(architect)
self.session.flush()
self.assertEqual(5, len(c.filter()))
self.session.delete(architect)
self.session.flush()
self.assertEqual(4, len(c.filter()), 'should have some dependencies left (but no orphan node)')
def test_cascading_on_delete_with_no_deps(self):
c = self.session.collection(Developer)
boss = c.filter_one({'name': 'boss'})
boss.delegates[0].name = 'assistant'
boss.delegates.append(Developer('c'))
self.session.persist(boss)
|
architect = Developer('architect', delegates=[boss.delegates[0]])
self.session.persist(architect)
self.session.flush()
self.session.delete(architect)
self.session.delete(boss)
self.session.flush()
count = len(c.filter())
self.assertEqual(0, count, 'There should not exist dependencies left (orphan removal). (remaining: {})'.format(count))
def
|
__data_provider(self):
return [
{
'class': Developer,
'fixtures': [
{'_id': 1, 'name': 'a'},
{'_id': 2, 'name': 'b'},
{'_id': 3, 'name': 'boss', 'delegates': [1, 2]}
]
}
]
|
HaebinShin/tensorflow
|
tensorflow/tools/test/run_and_gather_logs.py
|
Python
|
apache-2.0
| 3,287
| 0.006693
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test runner for TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shlex
import sys
import tensorflow as tf
# pylint: disable=g-import-not-at-top
# pylint: disable=g-bad-import-order
# pylint: disable=unused-import
# Note: cpuinfo and psutil are not installed for you in the TensorFlow
# OSS tree. They are installable via pip.
try:
import cpuinfo
import psutil
except ImportError as e:
tf.logging.error("\n\n\nERROR: Unable to import necessary library: {}. "
"Issuing a soft exit.\n\n\n".format(e))
sys.exit(0)
# pylint: enable=g-bad-import-order
# pylint: enable=unused-import
from google.protobuf import text_format
from tensorflow.core.util import test_log_pb2
from tensorflow.tools.test import run_and_gather_logs_lib
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("test_name", "", """Test target to run.""")
tf.app.flags.DEFINE_string(
"test_args", "", """Test arguments, space separated.""")
tf.app.flags.DEFINE_string(
"test_log_output", "", """Filename to write logs.""")
tf.app.flags.DEFINE_bool(
"test_log_output_use_tmpdir", False,
"""Store the log output into tmpdir?.""")
tf.app.flags.DEFINE_string(
"compilation_mode", "", """Mode used during this build (e.g. opt, dbg).""")
tf.app.flags.DEFINE_string(
"cc_flags", "", """CC flags used during this build.""")
def gather_build_configuration():
build_config = test_log_pb2.BuildConfiguration()
build_config.mode = FLAGS.compilation_mode
# Include all flags except includes
cc_flags = [
flag for flag in shlex.split(FLAGS.cc_flag
|
s)
if not flag.startswith("-i")]
build_config.cc_flags.extend(cc_flags)
return build_config
def main(unused_args):
test_name = FLAGS.test_name
test_args = FLAGS.test_args
test_results, _ =
|
run_and_gather_logs_lib.run_and_gather_logs(
test_name, test_args)
# Additional bits we receive from bazel
test_results.build_configuration.CopyFrom(gather_build_configuration())
serialized_test_results = text_format.MessageToString(test_results)
if not FLAGS.test_log_output:
print(serialized_test_results)
return
if FLAGS.test_log_output_use_tmpdir:
tmpdir = tf.test.get_temp_dir()
output_path = os.path.join(tmpdir, FLAGS.test_log_output)
else:
output_path = os.path.abspath(FLAGS.test_log_output)
tf.gfile.GFile(output_path, "w").write(serialized_test_results)
tf.logging.info("Test results written to: %s" % output_path)
if __name__ == "__main__":
tf.app.run()
|
sunqm/pyscf
|
pyscf/cc/ccsd_t.py
|
Python
|
apache-2.0
| 10,987
| 0.012469
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
RHF-CCSD(T) for real integrals
'''
import ctypes
import numpy
from pyscf import lib
from pyscf import symm
from pyscf.lib import logger
from pyscf.cc import _ccsd
# t3 as ijkabc
# JCP 94, 442 (1991); DOI:10.1063/1.460359. Error in Eq (1), should be [ia] >= [jb] >= [kc]
def kernel(mycc, eris, t1=None, t2=None, verbose=logger.NOTE):
cpu1 = cpu0 = (logger.process_clock(), logger.perf_counter())
log = logger.new_logger(mycc, verbose)
if t1 is None: t1 = mycc.t1
if t2 is None: t2 = mycc.t2
nocc, nvir = t1.shape
nmo = nocc + nvir
dtype = numpy.result_type(t1, t2, eris.ovoo.dtype)
if mycc.incore_complete:
ftmp = None
eris_vvop = numpy.zeros((nvir,nvir,nocc,nmo), dtype)
else:
ftmp = lib.H5TmpFile()
eris_vvop = ftmp.create_dataset('vvop', (nvir,nvir,nocc,nmo), dtype)
orbsym = _sort_eri(mycc, eris, nocc, nvir, eris_vvop, log)
mo_energy, t1T, t2T, vooo, fvo, restore_t2_inplace = \
_sort_t2_vooo_(mycc, orbsym, t1, t2, eris)
cpu1 = log.timer_debug1('CCSD(T) sort_eri', *cpu1)
cpu2 = list(cpu1)
orbsym = numpy.hstack((numpy.sort(orbsym[:nocc]),numpy.sort(orbsym[nocc:])))
o_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(orbsym[:nocc], minlength=8)))
v_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(orbsym[nocc:], minlength=8)))
o_sym = orbsym[:nocc]
oo_sym = (o_sym[:,None] ^ o_sym).ravel()
oo_ir_loc = numpy.append(0, numpy.cumsum(numpy.bincount(oo_sym, minlength=8)))
nirrep = max(oo_sym) + 1
orbsym = orbsym.astype(numpy.int32)
o_ir_loc = o_ir_loc.astype(numpy.int32)
v_ir_loc = v_ir_loc.astype(n
|
umpy.int32)
oo_ir_loc = oo_ir_loc.astype(numpy.int32)
if dtype == numpy.complex128:
drv = _ccsd.libcc.CCsd_t_zcontract
else:
drv = _ccsd.libcc.CCsd_t_contract
et_sum = numpy.zeros(1, dtype=dtype)
def contract(a0, a1, b0, b1, cache):
cache_row_a, cache_col_a, cache_row_b, cache_
|
col_b = cache
drv(et_sum.ctypes.data_as(ctypes.c_void_p),
mo_energy.ctypes.data_as(ctypes.c_void_p),
t1T.ctypes.data_as(ctypes.c_void_p),
t2T.ctypes.data_as(ctypes.c_void_p),
vooo.ctypes.data_as(ctypes.c_void_p),
fvo.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nocc), ctypes.c_int(nvir),
ctypes.c_int(a0), ctypes.c_int(a1),
ctypes.c_int(b0), ctypes.c_int(b1),
ctypes.c_int(nirrep),
o_ir_loc.ctypes.data_as(ctypes.c_void_p),
v_ir_loc.ctypes.data_as(ctypes.c_void_p),
oo_ir_loc.ctypes.data_as(ctypes.c_void_p),
orbsym.ctypes.data_as(ctypes.c_void_p),
cache_row_a.ctypes.data_as(ctypes.c_void_p),
cache_col_a.ctypes.data_as(ctypes.c_void_p),
cache_row_b.ctypes.data_as(ctypes.c_void_p),
cache_col_b.ctypes.data_as(ctypes.c_void_p))
cpu2[:] = log.timer_debug1('contract %d:%d,%d:%d'%(a0,a1,b0,b1), *cpu2)
# The rest 20% memory for cache b
mem_now = lib.current_memory()[0]
max_memory = max(0, mycc.max_memory - mem_now)
bufsize = (max_memory*.5e6/8-nocc**3*3*lib.num_threads())/(nocc*nmo) #*.5 for async_io
bufsize *= .5 #*.5 upper triangular part is loaded
bufsize *= .8 #*.8 for [a0:a1]/[b0:b1] partition
bufsize = max(8, bufsize)
log.debug('max_memory %d MB (%d MB in use)', max_memory, mem_now)
with lib.call_in_background(contract, sync=not mycc.async_io) as async_contract:
for a0, a1 in reversed(list(lib.prange_tril(0, nvir, bufsize))):
cache_row_a = numpy.asarray(eris_vvop[a0:a1,:a1], order='C')
if a0 == 0:
cache_col_a = cache_row_a
else:
cache_col_a = numpy.asarray(eris_vvop[:a0,a0:a1], order='C')
async_contract(a0, a1, a0, a1, (cache_row_a,cache_col_a,
cache_row_a,cache_col_a))
for b0, b1 in lib.prange_tril(0, a0, bufsize/8):
cache_row_b = numpy.asarray(eris_vvop[b0:b1,:b1], order='C')
if b0 == 0:
cache_col_b = cache_row_b
else:
cache_col_b = numpy.asarray(eris_vvop[:b0,b0:b1], order='C')
async_contract(a0, a1, b0, b1, (cache_row_a,cache_col_a,
cache_row_b,cache_col_b))
t2 = restore_t2_inplace(t2T)
et_sum *= 2
if abs(et_sum[0].imag) > 1e-4:
logger.warn(mycc, 'Non-zero imaginary part of CCSD(T) energy was found %s',
et_sum[0])
et = et_sum[0].real
log.timer('CCSD(T)', *cpu0)
log.note('CCSD(T) correction = %.15g', et)
return et
def _sort_eri(mycc, eris, nocc, nvir, vvop, log):
cpu1 = (logger.process_clock(), logger.perf_counter())
mol = mycc.mol
nmo = nocc + nvir
if mol.symmetry:
orbsym = symm.addons.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
eris.mo_coeff, check=False)
orbsym = numpy.asarray(orbsym, dtype=numpy.int32) % 10
else:
orbsym = numpy.zeros(nmo, dtype=numpy.int32)
o_sorted = _irrep_argsort(orbsym[:nocc])
v_sorted = _irrep_argsort(orbsym[nocc:])
vrank = numpy.argsort(v_sorted)
max_memory = max(0, mycc.max_memory - lib.current_memory()[0])
max_memory = min(8000, max_memory*.9)
blksize = min(nvir, max(16, int(max_memory*1e6/8/(nvir*nocc*nmo))))
log.debug1('_sort_eri max_memory %g blksize %d', max_memory, blksize)
dtype = vvop.dtype
with lib.call_in_background(vvop.__setitem__, sync=not mycc.async_io) as save:
bufopv = numpy.empty((nocc,nmo,nvir), dtype=dtype)
buf1 = numpy.empty_like(bufopv)
for j0, j1 in lib.prange(0, nvir, blksize):
ovov = numpy.asarray(eris.ovov[:,j0:j1])
#ovvv = numpy.asarray(eris.ovvv[:,j0:j1])
ovvv = eris.get_ovvv(slice(None), slice(j0,j1))
for j in range(j0,j1):
oov = ovov[o_sorted,j-j0]
ovv = ovvv[o_sorted,j-j0]
#if ovv.ndim == 2:
# ovv = lib.unpack_tril(ovv, out=buf)
bufopv[:,:nocc,:] = oov[:,o_sorted][:,:,v_sorted].conj()
bufopv[:,nocc:,:] = ovv[:,v_sorted][:,:,v_sorted].conj()
save(vrank[j], bufopv.transpose(2,0,1))
bufopv, buf1 = buf1, bufopv
cpu1 = log.timer_debug1('transpose %d:%d'%(j0,j1), *cpu1)
return orbsym
def _sort_t2_vooo_(mycc, orbsym, t1, t2, eris):
assert(t2.flags.c_contiguous)
vooo = numpy.asarray(eris.ovoo).transpose(1,0,3,2).conj().copy()
nocc, nvir = t1.shape
if mycc.mol.symmetry:
orbsym = numpy.asarray(orbsym, dtype=numpy.int32)
o_sorted = _irrep_argsort(orbsym[:nocc])
v_sorted = _irrep_argsort(orbsym[nocc:])
mo_energy = eris.mo_energy
mo_energy = numpy.hstack((mo_energy[:nocc][o_sorted],
mo_energy[nocc:][v_sorted]))
t1T = numpy.asarray(t1.T[v_sorted][:,o_sorted], order='C')
fvo = eris.fock[nocc:,:nocc]
fvo = numpy.asarray(fvo[v_sorted][:,o_sorted], order='C')
o_sym = orbsym[o_sorted]
oo_sym = (o_sym[:,None] ^ o_sym).ravel()
oo_sorted = _irrep_argsort(oo_sym)
#:vooo = eris.ovoo.transpose(1,0,2,3)
#:vooo = vooo[v_sorted][:,o_sorted][:,:,o_sorted][:,:,:,o_sorted]
|
alex-ip/agdc
|
api/source/main/python/datacube/api/__init__.py
|
Python
|
bsd-3-clause
| 6,195
| 0.002744
|
# ===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHA
|
NTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETH
|
ER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===============================================================================
from datacube.api.query import Month
__author__ = "Simon Oldfield"
import argparse
import logging
import os
from datacube.api.model import Satellite, DatasetType
from datacube.api.utils import PqaMask, WofsMask, OutputFormat
_log = logging.getLogger()
def satellite_arg(s):
if s in [sat.name for sat in Satellite]:
return Satellite[s]
raise argparse.ArgumentTypeError("{0} is not a supported satellite".format(s))
def month_arg(s):
if s in [month.name for month in Month]:
return Month[s]
raise argparse.ArgumentTypeError("{0} is not a supported month".format(s))
def pqa_mask_arg(s):
if s in [m.name for m in PqaMask]:
return PqaMask[s]
raise argparse.ArgumentTypeError("{0} is not a supported PQA mask".format(s))
def wofs_mask_arg(s):
if s in [m.name for m in WofsMask]:
return WofsMask[s]
raise argparse.ArgumentTypeError("{0} is not a supported WOFS mask".format(s))
def dataset_type_arg(s):
if s in [t.name for t in DatasetType]:
return DatasetType[s]
raise argparse.ArgumentTypeError("{0} is not a supported dataset type".format(s))
def writeable_dir(prospective_dir):
if not os.path.exists(prospective_dir):
raise argparse.ArgumentTypeError("{0} doesn't exist".format(prospective_dir))
if not os.path.isdir(prospective_dir):
raise argparse.ArgumentTypeError("{0} is not a directory".format(prospective_dir))
if not os.access(prospective_dir, os.W_OK):
raise argparse.ArgumentTypeError("{0} is not writeable".format(prospective_dir))
return prospective_dir
def readable_dir(prospective_dir):
if not os.path.exists(prospective_dir):
raise argparse.ArgumentTypeError("{0} doesn't exist".format(prospective_dir))
if not os.path.isdir(prospective_dir):
raise argparse.ArgumentTypeError("{0} is not a directory".format(prospective_dir))
if not os.access(prospective_dir, os.R_OK):
raise argparse.ArgumentTypeError("{0} is not readable".format(prospective_dir))
return prospective_dir
def readable_file(prospective_file):
if not os.path.exists(prospective_file):
raise argparse.ArgumentTypeError("{0} doesn't exist".format(prospective_file))
if not os.path.isfile(prospective_file):
raise argparse.ArgumentTypeError("{0} is not a file".format(prospective_file))
if not os.access(prospective_file, os.R_OK):
raise argparse.ArgumentTypeError("{0} is not readable".format(prospective_file))
return prospective_file
def date_arg(s):
try:
return parse_date(s)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a valid date".format(s))
def date_min_arg(s):
try:
return parse_date_min(s)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a valid date".format(s))
def date_max_arg(s):
try:
return parse_date_max(s)
except ValueError:
raise argparse.ArgumentTypeError("{0} is not a valid date".format(s))
def dummy(path):
_log.debug("Creating dummy output %s" % path)
import os
if not os.path.exists(path):
with open(path, "w") as f:
pass
def parse_date(s):
from datetime import datetime
return datetime.strptime(s, "%Y-%m-%d").date()
def parse_date_min(s):
from datetime import datetime
if s:
if len(s) == len("YYYY"):
return datetime.strptime(s, "%Y").date()
elif len(s) == len("YYYY-MM"):
return datetime.strptime(s, "%Y-%m").date()
elif len(s) == len("YYYY-MM-DD"):
return datetime.strptime(s, "%Y-%m-%d").date()
return None
def parse_date_max(s):
from datetime import datetime
import calendar
if s:
if len(s) == len("YYYY"):
d = datetime.strptime(s, "%Y").date()
d = d.replace(month=12, day=31)
return d
elif len(s) == len("YYYY-MM"):
d = datetime.strptime(s, "%Y-%m").date()
first, last = calendar.monthrange(d.year, d.month)
d = d.replace(day=last)
return d
elif len(s) == len("YYYY-MM-DD"):
d = datetime.strptime(s, "%Y-%m-%d").date()
return d
return None
def output_format_arg(s):
if s in [f.name for f in OutputFormat]:
return OutputFormat[s]
raise argparse.ArgumentTypeError("{0} is not a supported output format".format(s))
|
fsuarez6/rate_position_controller
|
scripts/bubble_technique.py
|
Python
|
bsd-3-clause
| 12,314
| 0.013805
|
#! /usr/bin/env python
"""
Notes
-----
Calculations are carried out with numpy.float64 precision.
This Python implementation is not optimized for speed.
Angles are in radians unless specified otherwise.
Quaternions ix+jy+kz+w are represented as [x, y, z, w].
"""
import rospy
# Messages
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from baxter_core_msgs.msg import EndpointState
from omni_msgs.msg import OmniState, OmniFeedback, OmniButtonEvent
from geometry_msgs.msg import Vector3, Quaternion, Transform, PoseStamped, Point, Wrench
from visualization_msgs.msg import Marker
# State Machine
import smach
import smach_ros
from smach import CBState
# Math
from math import pi, exp, sin, sqrt
import numpy as np
import tf.transformations as tr
class TextColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
class RatePositionController:
STATES = ['GO_TO_CENTER', 'POSITION_CONTROL', 'VIBRATORY_PHASE', 'RATE_CONTROL', 'RATE_COLLISION']
def __init__(self):
# Create a SMACH state machine
self.sm = smach.StateMachine(outcomes=['succeeded', 'aborted'])
with self.sm:
# Add states to the state machine
smach.StateMachine.add('GO_TO_CENTER', CBState(self.go_to_center, cb_args=[self]),
transitions={'lock':'GO_TO_CENTER', 'succeeded':'POSITION_CONTROL', 'aborted': 'aborted'})
smach.StateMachine.add('POSITION_CONTROL', CBState(self.position_control, cb_args=[self]),
transitions={'stay':'POSITION_CONTROL', 'leave':'RATE_CONTROL', 'aborted': 'aborted'})
smach.StateMachine.add('RATE_CONTROL', CBState(self.rate_control, cb_args=[self]),
transitions={'stay':'RATE_CONTROL', 'leave':'GO_TO_CENTER', 'aborted': 'aborted'})
# Read all the parameters from the parameter server
# Topics to interact
master_name = self.read_parameter('~master_name', 'phantom')
slave_name = self.read_parameter('~slave_name', 'grips')
self.master_state_topic = '/%s/state' % master_name
self.feedback_topic = '/%s/force_feedback' % master_name
self.slave_state_topic = '/%s/state' % slave_name
self.ik_mc_topic = '/%s/ik_command' % slave_name
# Workspace definition
self.units = self.read_parameter('~units', 'mm')
width = self.read_parameter('~workspace/width', 140.0)
height = self.read_parameter('~workspace/height', 100.0)
depth = self.read_parameter('~workspace/depth', 55.0)
self.center_pos = self.read_parameter('~workspace/center', [0, 0 ,0])
self.workspace = np.array([width, depth, height])
self.hysteresis = self.read_parameter('~hysteresis', 3.0)
# Force feedback parameters
self.locked = False
self.k_center = self.read_parameter('~k_center', 0.1)
self.b_center = self.read_parameter('~b_center', 0.003)
self.k_rate = self.read_parameter('~k_rate', 0.05)
self.b_rate = self.read_parameter('~b_rate', 0.003)
# Position parameters
self.publish_frequency = self.read_parameter('~publish_rate', 1000.0)
self.position_ratio = self.read_parameter('~position_ratio', 250)
self.position_axes = [0, 1, 2]
self.position_sign = np.array([1.0, 1.0, 1.0])
self.axes_mapping = self.read_parameter('~axes_mapping', ['x', 'y' ,'z'])
if len(self.axes_mapping) != 3:
rospy.logwarn('The invalid number of values in [axes_mapping]. Received 3, expected %d' % len(self.axes_mapping))
for i, axis in enumerate(self.axes_mapping):
axis = axis.lower()
if '-' == axis[0]:
axis = axis[1:]
self.position_sign[i] = -1.0
if axis not in ('x','y','z'):
rospy.logwarn('Invalid axis %s given in [axes_mapping]' % axis)
self.position_axes[i] = ['x','y','z'].index(axis)
self.workspace = self.change_axes(self.workspace)
# Rate parameters
self.rate_pivot = np.zeros(3)
self.rate_gain = self.read_parameter('~rate_gain', 1.0)
# Initial values
self.frame_id = self.read_parameter('~frame_id', 'world')
self.colors = TextColors()
self.master_pos = None
self.master_rot = np.array([0, 0, 0, 1])
self.master_vel = np.zeros(3)
self.master_dir = np.zeros(3)
self.slave_pos = None
self.slave_rot = np.array([0, 0, 0, 1])
self.timer = None
self.force_feedback = np.zeros(3)
self.pos_force_feedback = np.zeros(3)
# Synch
self.slave_synch_pos = np.zeros(3)
self.slave_synch_rot = np.array([0, 0, 0, 1])
# Setup Subscribers/Publishers
self.feedback_pub = rospy.Publisher(self.feedback_topic, OmniFeedback)
self.ik_mc_pub = rospy.Publisher(self.ik_mc_topic, PoseStamped)
self.vis_pub = rospy.Publisher('visualization_marker', Marker)
rospy.Subscriber(self.master_state_topic, OmniState, self.cb_master_state)
rospy.Subscriber(self.slave_state_topic, EndpointState, self.cb_slave_state)
rospy.Subscriber('/takktile/force_feedback', Wrench, self.feedback_cb)
self.loginfo('Waiting for [%s] and [%s] topics' % (self.master_state_topic, self.slave_state_topic))
while not rospy.is_shutdown():
if (self.slave_pos == None) or (self.master_pos == None):
rospy.sleep(0.01)
else:
self.loginfo('Rate position controller running')
# Register rospy shutdown hook
rospy.on_shutdown(self.shutdown_hook)
break
# Make sure the first command sent to the slave is equal to its current position6D
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
# Start the timer that will publish the ik commands
self.command_timer = rospy.Timer(rospy.Duration(1.0/self.publish_frequency), self.publish_command)
self.draw_timer = rospy.Timer(rospy.Duration(1.0/10.0), self.draw_position_region)
self.loginfo('State machine state: GO_TO_CENTER')
@smach.cb_interface(outcomes=['lock', 'succeeded', 'aborted'])
def go_to_center(user_data, self):
if not np.allclose(np.zeros(3), self.master_pos, atol=self.hysteresis) or self.locked:
self.force_feedback = (self.k_center * self.master_pos + self.b_center * self.master_vel) * -1.0
return 'lock'
else:
self.force_feedback = np.zeros(3)
self.slave_synch_pos = np.array(self.slave_pos)
self.command_pos = np.array(self.slave_pos)
self.loginfo('State machine transitioning: GO_TO_CENTER:succeeded-->POSITION_CONTROL')
return 'succeeded'
@smach.cb_interface(outcomes=['stay', 'leave', 'aborted'])
def position_control(user_data, self):
if self.inside_workspace(self.master_pos) and not self.locked:
self.command_pos = self.slave_synch_pos + self.master_pos / self.position_ratio
self.force_feedback = self.pos_force_feedback
return 'stay'
else:
self.force_feedback = np.zeros(3)
self.command_pos = np.array(self.slave_pos)
self.rate_pivot = self.master_pos
self.loginfo('State machine transitioning: POSITION_CONTROL:leave-->RATE_CONTROL')
return 'leave'
@smach.cb_interface(outcomes=['stay', 'leave', 'aborted'])
def rate_control(user_data, self):
if not (self.inside_workspace(self.master_pos) or self.locked):
penetration = sqrt(np.sum((self.master_pos - self.rate_pivot) ** 2)) * self.normalize_vector(self.master_pos)
# Send the force feedback to the master
self.force_feedback = (self.k_rate * penetration + self.b_rate * self.master_vel) * -1.0
# Send the rate command to the slave
self.command_pos += (sel
|
f.rate_gain * penetration) / self.position_ratio
# Move the workspace
self.slave_synch_pos = self.slave_pos - self.master_pos / self.position_ratio
return 'stay'
else:
self.command_pos = np.array(self.slave_pos)
self.force_feedback = np.zeros(3)
self.loginfo('State machine transitioning: RATE_CONTROL:leave-->POSITION_CONTROL')
return 'leave'
@smach
|
.cb_interface(outcomes=[
|
kobotoolbox/kpi
|
kpi/views/v2/user_asset_subscription.py
|
Python
|
agpl-3.0
| 863
| 0
|
# coding: utf-8
from rest_framework import viewsets
from kpi.models import UserAssetSubscription
from kpi.serializers.v2.user_asset_subscription import (
UserAssetSubscriptionSerializer,
)
from kpi.utils.object_permission import get_database_user
class UserAssetSubscriptionViewSet(viewsets.ModelViewSet):
queryset = UserAssetSubscription.objects.none(
|
)
serializer_class = UserAssetSubscriptionSerializer
lookup_field = 'uid'
def get_queryset(self):
user = get_database_user(self.request.user)
criteria = {'user':
|
user}
if 'asset__uid' in self.request.query_params:
criteria['asset__uid'] = self.request.query_params[
'asset__uid']
return UserAssetSubscription.objects.filter(**criteria)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
|
artourkin/Cinnamon
|
files/usr/share/cinnamon/desklets/launcher@cinnamon.org/editorDialog.py
|
Python
|
gpl-2.0
| 8,165
| 0.004776
|
#!/usr/bin/env python2
#-*-indent-tabs-mode: nil-*-
import sys
import os.path
import gi
from gi.repository import Gtk, Gio
SCHEMAS = "org.cinnamon.desklets.launcher"
LAUNCHER_KEY = "launcher-list"
HOME_DIR = os.path.expanduser("~")+"/"
CUSTOM_LAUNCHERS_PATH = HOME_DIR + ".cinnamon/panel-launchers/"
EDITOR_DIALOG_UI_PATH = "/usr/share/cinnamon/desklets/launcher@cinnamon.org/editorDialog.ui"
class EditorDialog:
def __init__(self, desklet_id=-1):
self.launcher_settings = Gio.Settings.new(SCHEMAS)
self.launcher_type = "Application"
self.name = ""
self.desklet_id = desklet_id
if not desklet_id == -1:
launcher_list = self.launcher_settings.get_strv(LAUNCHER_KEY)
launcher = ""
for item in launcher_list:
if item.split(":")[0] == str(self.desklet_id):
launcher = item.split(":")[1][:-8]
break;
self.name = launcher
if self.name[:24] == "cinnamon-custom-launcher":
self.launcher_type = "Custom Application"
self.tree = Gtk.Builder()
self.tree.add_from_file(EDITOR_DIALOG_UI_PATH)
self.dialog = self.tree.get_object("dialog")
self.launcher_type_combo_box = self.tree.get_object("launcher_type_combo_box")
self.name_entry = self.tree.get_object("name_entry")
self.title_entry = self.tree.get_object("title_entry")
self.command_entry = self.tree.get_object("command_entry")
self.icon_name_entry = self.tree.get_object("icon_name_entry")
self.launcher_icon = self.tree.get_object("launcher_icon")
self.name_entry.set_text(self.name)
self.model = self.launcher_type_combo_box.get_model()
self.citer = [self.model.get_iter_from_string("0"),self.model.get_iter_from_string("1")]
self.launcher_type_combo_box.set_active_iter(self.citer[self.launcher_type_to_index(self.launcher_type)])
self.update_sensitivity()
self.set_fields_by_name()
self.on_icon_changed(self.icon_name_entry.get_text())
self.tree.connect_signals(self)
self.dialog.show_all()
self.dialog.connect("destroy", Gtk.main_quit)
self.dialog.connect("key_release_event", self.on_key_release_event)
Gtk.main()
def launcher_type_to_index(self,launcher_type):
if launcher_type == "Application":
return 0
elif launcher_type == "Custom Application":
return 1
def update_sensitivity(self):
sensitive = True
if (self.launcher_type == "Application"):
sensitive = False
self.name_entry.set_sensitive(not sensitive)
self.title_entry.set_sensitive(sensitive)
self.command_entry.set_sensitive(sensitive)
self.icon_name_entry.set_sensitive(sensitive)
if (self.launcher_type == "Application"):
self.name_entry.grab_focus()
else:
self.title_entry.grab_focus()
def on_launcher_type_combo_box_changed(self, widget):
self.launcher_type = self.launcher_type_combo_box.get_active_text()
self.update_sensitivity()
self.on_name_changed(self.name_entry)
def on_icon_changed(self, widget):
self.launcher_icon.set_from_icon_name(self.icon_name_entry.get_text(), 48)
def on_name_changed(self, widget):
if (self.launcher_type == "Application"):
self.set_fields_by_name()
def set_fields_by_name(self):
application = Application(self.name_entry.get_text() + ".desktop")
if application.title:
self.title_entry.set_text(application.title)
self.command_entry.set_text(application.command)
self.icon_name_entry.set_text(application.icon_name)
def on_key_release_event(self, widget, event):
if event.keyval == 65293: # Enter button
self.on_edit_ok_clicked(widget)
def on_edit_close_clicked(self, widget):
self.dialog.destroy()
def on_edit_ok_clicked(self, widget):
if not self.name_entry.get_text():
return None
if (self.launcher_type == "Application"):
launcher_name = self.name_entry.get_text() + ".desktop"
elif (self.launcher_type == "Custom Application"):
launcher_name = self.write_custom_application()
enabled_desklets = None
if self.desklet_id == -1: # Add new launcher
settings = Gio.Settings.new("org.cinnamon")
self.desklet_id = settings.get_int("next-desklet-id")
settings.set_int("next-desklet-id", self.desklet_id + 1)
enabled_desklets = settings.get_strv("enabled-desklets")
enabled_desklets.append("launcher@cinnamon.org:%s:0:100" % self.desklet_id)
launcher_list = self.launcher_settings.get_strv(LAUNCHER_KEY)
# If the application is initiall set in the list, remove them all
for item in launcher_list:
if item.split(":")[0] == str(self.desklet_id):
launcher_list.remove(item)
launcher_list.append(str(self.desklet_id) + ":" + launcher_name)
self.launcher_settings.set_strv(LAUNCHER_KEY, launcher_list)
# Update desklets list now if new desklet is made
if enabled_desklets:
settings.set_strv("enabled-desklets", enabled_desklets)
self.dialog.destroy()
def get_custom_id(self):
i = 1
directory = Gio.file_new_for_path(CUSTOM_LAUNCHERS_PATH)
if not directory.query_exists(None):
directory.make_directory_with_parents(None)
fileRec = Gio.file_parse_name(CUSTOM_LAUNCHERS_PATH + 'cinnamon-custom-launcher-' + str(i) + '.desktop')
while fileRec.query_exists(None):
i = i + 1
fileRec = Gio.file_parse_name(CUSTOM_LAUNCHERS_PATH + 'cinnamon-custom-launcher-' + str(i) + '.desktop')
return i;
def write_custom_application(self):
i = self.get_custom_id();
file_name = "cinnamon-custom-launcher-" + str(i) + ".desktop"
file_path = CUSTOM_LAUNCHERS_PATH + file_name
title = self.title_entry.get_text()
command = self.command_entry.get_text()
icon_name = self.icon_name_entry.get_text()
_file = open(file_path,"w+")
write_list=["[Desktop Entry]\n","Type=Application\n", "Name=" + title + "\n","Exec=" + command + "\n","
|
Icon=" + icon_name + "\n"]
_file.writelines(write_list)
_file.close()
return file_name
class Application:
def __init__(self, file_name):
self.file_name = file_name
self._path = None
|
self.icon_name = None
self.title = None
self.command = None
if (os.path.exists(CUSTOM_LAUNCHERS_PATH + file_name)):
self._path = CUSTOM_LAUNCHERS_PATH + file_name
elif (os.path.exists("/usr/share/applications/" + file_name)):
self._path = "/usr/share/applications/" + file_name
if self._path:
self._file = open(self._path, "r")
while self._file:
line = self._file.readline()
if len(line)==0:
break
if (line.find("Name") == 0 and (not "[" in line)):
self.title = line.replace("Name","").replace("=","").replace("\n","")
if (line.find("Icon") == 0):
self.icon_name = line.replace("Icon","").replace(" ","").replace("=","").replace("\n","")
if (line.find("Exec") == 0):
self.command = line.replace("Exec","").replace("=","").replace("\n","")
if self.icon_name and self.title and self.command:
break
if not self.icon_name:
self.icon_name = "application-x-executable"
if not self.title:
self.title = "Application"
if not self.command:
self.command = ""
self._file.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
dialog = EditorDialog(sys.argv[1])
else:
dialog = EditorDialog()
|
oliver-sanders/cylc
|
tests/unit/tui/test_data.py
|
Python
|
gpl-3.0
| 1,331
| 0
|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT A
|
NY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cylc.flow.tui.data
from cylc.flow.tui.data import generate_mutation
def test_generate_mutation(monkeypatch):
"""It should produce
|
a GraphQL mutation with the args filled in."""
arg_types = {
'foo': 'String!',
'bar': '[Int]'
}
monkeypatch.setattr(cylc.flow.tui.data, 'ARGUMENT_TYPES', arg_types)
assert generate_mutation(
'my_mutation',
['foo', 'bar']
) == '''
mutation($foo: String!, $bar: [Int]) {
my_mutation (foos: $foo, bars: $bar) {
result
}
}
'''
|
napsternxg/gensim
|
gensim/models/poincare.py
|
Python
|
gpl-3.0
| 75,098
| 0.003356
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Jayant Jain <jayantjain1992@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Python implementation of Poincaré Embeddings.
These embeddings are better at capturing latent hierarchical information than traditional Euclidean embeddings.
The method is described in detail in `Maximilian Nickel, Douwe Kiela -
"Poincaré Embeddings for Learning Hierarchical Representations" <https://arxiv.org/abs/1705.08039>`_.
The main use-case is to automatically learn hierarchical representations of nodes from a tree-like structure,
such as a Directed Acyclic Graph (DAG), using a transitive closure of the relations. Representations of nodes in a
symmetric graph can also be learned.
This module allows training Poincaré Embeddings from a training file containing relations of graph in a
csv-like format, or from a Python iterable of relations.
Examples
--------
Initialize and train a model from a list
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
>>> model.train(epochs=50)
Initialize and train a model from a file containing one relation per line
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel, PoincareRelations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(PoincareRelations(file_path), negative=2)
>>> model.train(epochs=50)
"""
import csv
import logging
from numbers import Integral
import sys
import time
import numpy as np
from collections import defaultdict, Counter
from numpy import random as np_random
from scipy.stats import spearmanr
from six import string_types
from six.moves import zip, range
from gensim import utils, matutils
from gensim.models.keyedvectors import Vocab, BaseKeyedVectors
from gensim.models.utils_any2vec import _save_word2vec_format, _load_word2vec_format
from numpy import float32 as REAL
try:
from autograd import grad # Only required for optionally verifying gradients while training
from autograd import numpy as grad_np
AUTOGRAD_PRESENT = True
except ImportError:
AUTOGRAD_PRESENT = False
logger = logging.getLogger(__name__)
class PoincareModel(utils.SaveLoad):
"""Train, use and evaluate Poincare Embeddings.
The model can be stored/loaded via its :meth:`~gensim.models.poincare.PoincareModel.save`
and :meth:`~gensim.models.poincare.PoincareModel.load` methods, or stored/loaded in the word2vec format
via `model.kv.save_word2vec_format` and :meth:`~gensim.models.poincare.PoincareKeyedVectors.load_word2vec_format`.
Notes
-----
Training cannot be resumed from a model loaded via `load_word2vec_format`, if you wish to train further,
use :meth:`~gensim.models.poincare.PoincareModel.save` and :meth:`~gensim.models.poincare.PoincareModel.load`
methods instead.
An important attribute (that provides a lot of additional functionality when directly accessed) are the
keyed vectors:
self.kv : :class:`~gensim.models.poincare.PoincareKeyedVectors`
This object essentially contains the mapping between nodes and embeddings, as well the vocabulary of the model
(set of unique nodes seen by the model). After training, it can be used to perform operations on the vectors
such as vector lookup, distance and similarity calculations etc.
See the documentation of its class for usage examples.
"""
def __init__(self, train_data, size=50, alpha=0.1, negative=10, workers=1, epsilon=1e-5, regularization_coeff=1.0,
burn_in=10, burn_in_alpha=0.01, init_range=(-0.001, 0.001), dtype=np.float64, seed=0):
"""Initialize and train a Poincare embedding model from an iterable of relations.
Parameters
----------
train_data : {iterable of (str, str), :class:`gensim.models.poincare.PoincareRelations`}
Iterable of relations, e.g. a list of tuples, or a :class:`gensim.models.poincare.PoincareRelations`
instance streaming from a file. Note that the relations are treated as ordered pairs,
i.e. a relation (a, b) does not imply the opposite relation (b, a). In case the relations are symmetric,
the data should contain both relations (a, b) and (b, a).
size : int, optional
Number of dimensions of the trained model.
alpha : float, optional
Learning rate for training.
negative : int, optional
Number of negative samples to use.
workers : int, optional
Number of threads to use for training the model.
epsilon : float, optional
Constant used for clipping embeddings below a norm of one.
regularization_coeff : float, optional
Coefficient used for l2-regularization while training (0 effectively disables regularization).
burn_in : int, optional
Number of epochs to use for burn-in initialization (0 means no burn-in).
burn_in_alpha : float, optional
Learning rate for burn-in initialization, ignored if `burn_in` is 0.
init_range : 2-tuple (float, float)
Range within which the vectors are randomly initialized.
dtype : numpy.dtype
The numpy dtype to use for the vectors in the model (numpy.float64, numpy.float32 etc).
Using lower precision floats may be useful in increasing training speed and reducing memory usage.
seed : int, optional
|
Seed for random to ensure reproducibility.
Examples
--------
Initialize a model from a
|
list:
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel
>>> relations = [('kangaroo', 'marsupial'), ('kangaroo', 'mammal'), ('gib', 'cat')]
>>> model = PoincareModel(relations, negative=2)
Initialize a model from a file containing one relation per line:
.. sourcecode:: pycon
>>> from gensim.models.poincare import PoincareModel, PoincareRelations
>>> from gensim.test.utils import datapath
>>> file_path = datapath('poincare_hypernyms.tsv')
>>> model = PoincareModel(PoincareRelations(file_path), negative=2)
See :class:`~gensim.models.poincare.PoincareRelations` for more options.
"""
self.train_data = train_data
self.kv = PoincareKeyedVectors(size)
self.all_relations = []
self.node_relations = defaultdict(set)
self._negatives_buffer = NegativesBuffer([])
self._negatives_buffer_size = 2000
self.size = size
self.train_alpha = alpha # Learning rate for training
self.burn_in_alpha = burn_in_alpha # Learning rate for burn-in
self.alpha = alpha # Current learning rate
self.negative = negative
self.workers = workers
self.epsilon = epsilon
self.regularization_coeff = regularization_coeff
self.burn_in = burn_in
self._burn_in_done = False
self.dtype = dtype
self.seed = seed
self._np_random = np_random.RandomState(seed)
self.init_range = init_range
self._loss_grad = None
self.build_vocab(train_data)
def build_vocab(self, relations, update=False):
"""Build the model's vocabulary from known relations.
Parameters
----------
relations : {iterable of (str, str), :class:`gensim.models.poincare.PoincareRelations`}
Iterable of relations, e.g. a list of tuples, or a :class:`gensim.models.poincare.PoincareRelations`
instance streaming from a file. Note that the relations are treated as ordered pairs,
i.e. a relation (a, b) does not imply the opposite relation (b, a). In case the relations are symmetric,
the data should contain both relations (a, b) and (b, a).
upda
|
tungvx/deploy
|
Django-0.90/django/middleware/common.py
|
Python
|
apache-2.0
| 3,684
| 0.004343
|
from django.conf import settings
from django.utils import httpwrappers
from django.core.mail import mail_managers
import md5, os
class CommonMiddleware:
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing "www."s.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
def process_request(self, request):
|
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for
|
denied User-Agents
if request.META.has_key('HTTP_USER_AGENT'):
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
return httpwrappers.HttpResponseForbidden('<h1>Forbidden</h1>')
# Check for a redirect based on settings.APPEND_SLASH and settings.PREPEND_WWW
old_url = [request.META['HTTP_HOST'], request.path]
new_url = old_url[:]
if settings.PREPEND_WWW and not old_url[0].startswith('www.'):
new_url[0] = 'www.' + old_url[0]
# Append a slash if append_slash is set and the URL doesn't have a
# trailing slash or a file extension.
if settings.APPEND_SLASH and (old_url[1][-1] != '/') and ('.' not in old_url[1].split('/')[-1]):
new_url[1] = new_url[1] + '/'
if new_url != old_url:
# Redirect
newurl = "%s://%s%s" % (os.environ.get('HTTPS') == 'on' and 'https' or 'http', new_url[0], new_url[1])
if request.GET:
newurl += '?' + request.GET.urlencode()
return httpwrappers.HttpResponseRedirect(newurl)
return None
def process_response(self, request, response):
"Check for a flat page (for 404s) and calculate the Etag, if needed."
if response.status_code == 404:
if settings.SEND_BROKEN_LINK_EMAILS:
# If the referrer was from an internal link or a non-search-engine site,
# send a note to the managers.
domain = request.META['HTTP_HOST']
referer = request.META.get('HTTP_REFERER', None)
is_internal = referer and (domain in referer)
path = request.get_full_path()
if referer and not _is_ignorable_404(path) and (is_internal or '?' not in referer):
mail_managers("Broken %slink on %s" % ((is_internal and 'INTERNAL ' or ''), domain),
"Referrer: %s\nRequested URL: %s\n" % (referer, request.get_full_path()))
return response
# Use ETags, if requested.
if settings.USE_ETAGS:
etag = md5.new(response.get_content_as_string(settings.DEFAULT_CHARSET)).hexdigest()
if request.META.get('HTTP_IF_NONE_MATCH') == etag:
response = httpwrappers.HttpResponseNotModified()
else:
response['ETag'] = etag
return response
def _is_ignorable_404(uri):
"Returns True if a 404 at the given URL *shouldn't* notify the site managers"
for start in settings.IGNORABLE_404_STARTS:
if uri.startswith(start):
return True
for end in settings.IGNORABLE_404_ENDS:
if uri.endswith(end):
return True
return False
|
tschaefer/remote
|
remote/tv.py
|
Python
|
bsd-3-clause
| 1,157
| 0.001729
|
# -*- coding: utf-8 -*-
import requests
import urlparse
class TV(object):
def __init__(self, url):
self.url = url
self.chan = None
self.stream = None
def _post(self, endpoint, json):
url = urlparse.urljoin(self.url, endpoint)
try:
requests.post(url, json=json)
except:
pass
def _json(self, action, data='', options='live'):
return {
'action': action,
'data': data,
'options': options
}
def start(self, chan, stream, modus):
self.chan = chan
self.stream = stream
json = self._json('start', data=stream, options=modus)
self._post('playback', json)
def stop(self):
self.cha
|
n = None
self.stream = None
json = self._json('stop')
self._post('playback', json)
def play(self):
json = self._json('play')
self._post('
|
playback', json)
def pause(self):
json = self._json('pause')
self._post('playback', json)
def vol(self, action):
json = self._json(action)
self._post('volume', json)
|
alexandrul-ci/robotframework
|
utest/utils/test_etreesource.py
|
Python
|
apache-2.0
| 1,816
| 0
|
import os
import unittest
from robot.utils.asserts import assert_equal, assert_true
from robot.utils.etreewrapper import ETSource, ET
from robot.utils import IRONPYTHON, PY3
PATH = os.path.join(os.path.dirname(__file__), 'test_etreesource.py')
if PY3:
unicode = str
class TestETSource(unittest.TestCase):
def test_path_to_file(self):
source = ETSource(PATH)
with source as src:
assert_equal(src,
|
PATH)
self._verify_string_representation(source, PATH)
assert_true(source._opened is None)
def test_opened_file_object(self):
source = ETSource(open(PATH))
with source as src:
assert_true(src.read().startswith('import os'))
assert_true(src.closed is False)
self._verify_string_representation(source, PATH)
assert_true(source._opened is None)
def test_byte_string(self):
self._t
|
est_string('\n<tag>content</tag>\n')
def test_unicode_string(self):
self._test_string(u'\n<tag>hyv\xe4</tag>\n')
def _test_string(self, xml):
source = ETSource(xml)
with source as src:
content = src.read()
if not IRONPYTHON:
content = content.decode('UTF-8')
assert_equal(content, xml)
self._verify_string_representation(source, '<in-memory file>')
assert_true(source._opened.closed)
with ETSource(xml) as src:
assert_equal(ET.parse(src).getroot().tag, 'tag')
def test_non_ascii_string_repr(self):
self._verify_string_representation(ETSource(u'\xe4'), u'\xe4')
def _verify_string_representation(self, source, expected):
assert_equal(unicode(source), expected)
assert_equal(u'-%s-' % source, '-%s-' % expected)
if __name__ == '__main__':
unittest.main()
|
kamsuri/vms
|
vms/organization/urls.py
|
Python
|
gpl-2.0
| 585
| 0.001709
|
# Django
from django.conf.urls import patterns, url
# local Django
from organization.views import Organizati
|
onCreateView, OrganizationDeleteView, OrganizationListView, OrganizationUpdateView
urlpatterns = patterns(
'',
|
url(r'^create/$', OrganizationCreateView.as_view(), name='create'),
url(r'^delete/(?P<organization_id>\d+)$',
OrganizationDeleteView.as_view(),
name='delete'),
url(r'^edit/(?P<organization_id>\d+)$',
OrganizationUpdateView.as_view(),
name='edit'),
url(r'^list/$', OrganizationListView.as_view(), name='list'),
)
|
SheffieldML/GPy
|
doc/source/conf.py
|
Python
|
bsd-3-clause
| 12,464
| 0.007221
|
# -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 18 18:16:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#for p in os.walk('../../GPy'):
# sys.path.append(p[0])
sys.path.insert(0, os.path.abspath('../../'))
sys.path.insert(0, os.path.abspath('../../GPy/'))
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
"GPy.util.linalg.linalg_cython",
"GPy.util.linalg_cython",
"sympy",
'GPy.kern.stationary_cython',
"sympy.utilities",
"sympy.utilities.lambdify",
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
#on_rtd = True
if on_rtd:
# sys.path.append(os.path.abspath('../GPy'))
import subprocess
# build extensions:
# proc = subprocess.Popen("cd ../../; python setup.py build_ext install", stdout=subprocess.PIPE, shell=True)
# (out, err) = proc.communicate()
# print("build_ext develop:")
# print(out)
# print current folder:
proc = subprocess.Popen("pwd", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print("$ pwd: ")
print(out)
#Lets regenerate our rst files from the source, -P adds private modules (i.e kern._src)
proc = subprocess.Popen("sphinx-apidoc -M -P -f -o . ../../GPy", stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
print("$ Apidoc:")
print(out)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
]
#---sphinx.ext.inheritance_diagram config
inheritance_graph_attrs = dict(rankdir="LR", dpi=1200)
#----- Autodoc
#import sys
#try:
# from unittest.mock import MagicMock
#except:
# from mock import Mock as MagicMock
#
#class Mock(MagicMock):
# @classmethod
# def __getattr__(cls, name):
# return Mock()
#
#
#sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
#
import sphinx_rtd_theme
autodoc_default_flags = ['members',
#'undoc-members',
#'private-members',
#'special-members',
#'inherited-members',
'show-inheritance']
autodoc_member_order = 'groupwise'
add_function_parentheses = False
add_module_names = False
#modindex_common_prefix = ['GPy']
show_authors = True
# ------ Sphinx
# Add any paths that contain templates here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]#templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
#author = u'`Humans <https://github.com/SheffieldML/GPy/graphs/contributors>`_'
author = 'GPy Authors, see https://github.com/SheffieldML/GPy/graphs/contributors'
copyright = u'2020, '+author
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open('../../GPy/__version__.py', 'r') as f:
version = f.read()
release = version
print version
# version = '0.8.8'
# The full version, including alpha/beta/rc tags.
# release = '0.8.8'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'python'
# autodoc:
autoclass_content = 'both'
autodoc_default_flags = ['members',
#'undoc-members',
#'private-members',
#'special-members',
#'inherited-members',
'show-inheritance']
autodoc_member_order = 'groupwise'
add_function_parentheses = False
add_module_names = False
modindex_common_prefix = ['paramz']
show_authors = True
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = dict(sidebarwidth='20}
# Add any paths that contain custom themes here, relative to t
|
his directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_t
|
itle.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'wide.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated
|
lioupayphone/gdeploy
|
gdeploylib/helpers.py
|
Python
|
gpl-2.0
| 21,630
| 0.001803
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Nandaja Varma <nvarma@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
# helpers.py
# ---------
# Helpers consists of a couple of general helper methods
# called from various parts of the framework to create directories
# and run commands
#
import os
import subprocess
import re
import sys
import itertools
import shutil
import argparse
import ConfigParser
try:
import yaml
except ImportError:
print "Error: Package PyYAML not found."
sys.exit(0)
from global_vars import Global
from yaml_writer import YamlWriter
from defaults import feature_list
class Helpers(Global, YamlWriter):
'''
Some helper methods to help in directory/file creation/removal etc.
'''
def is_present_in_yaml(self, filename, item):
if not os.path.isfile(filename):
return
doc = self.read_yaml(filename)
if doc and item in doc:
return True
return False
def get_value_from_yaml(self, filename, item):
doc = self.read_yaml(filename)
return doc.get(item)
def read_yaml(self, filename):
with open(filename, 'r') as f:
return yaml.load(f)
def cleanup_and_quit(self, ret=0):
if os.path.isdir(Global.base_dir) and not Global.keep:
shutil.rmtree(Global.base_dir)
Global.logger.info("Deleting playbook data %s"%Global.base_dir)
else:
print "\nYou can view the generated configuration files "\
"inside %s" % Global.base_dir
Global.logger.info("Configuration saved inside %s" %Global.base_dir)
Global.logger.info("Terminating gdeploy...")
sys.exit(ret)
def mk_dir(self, direc):
if os.path.isdir(direc):
shutil.rmtree(direc)
os.makedirs(direc)
def touch_file(self, filename):
try:
os.remove(filename)
except OSError:
pass
os.mknod(filename)
def copy_files(self, source_dir):
files = os.listdir(source_dir)
files_to_move = [self.get_file_dir_path(source_dir, f) for f in files]
for each in files_to_move:
try:
shutil.copy(each, Global.base_dir)
except IOError as e:
print "\nError: File copying failed(%s)" % e
self.cleanup_and_quit()
def get_file_dir_path(self, basedir, newdir):
return os.path.join(os.path.realpath(basedir), newdir)
def uppath(self, path, n):
# To get the n the parent of a particular directory
return os.sep.join(path.split(os.sep)[:-n])
def format_values(self, option_dict, excemption=''):
'''
This method will split the values provided in config by user,
when parsed as a dictionary
'''
for key, value in option_dict.iteritems():
'''
HACK: The value for option 'transport' can have comma in it,
eg: tcp,rdma. so comma here doesn't mean that it can have
multiple values. Hence the excemption argument
'''
if ',' in str(value) and key not in [excemption]:
option_dict[
key] = self.split_comma_separated_options(value)
return option_dict
def set_default_values(self, dictname, default_value_dict):
for key, value in default_value_dict.iteritems():
if key not in dictname:
dictname[key] = value
return dictname
def is_option_present(self, param, section_dict, reqd=True):
if not section_dict.get(param):
if reqd:
print "Error: %s not provided in the config. " \
"Cannot continue!" % param
self.clea
|
nup_and_quit()
return False
def split_comma_separated_options(self, options):
if options:
pat_group = re.search("(.*){(.*)}(.*)", options
|
)
if not pat_group:
return filter(None, options.split(','))
else:
result = []
for i in range(1,3):
if i == 2:
result[-1] += '{' + pat_group.group(2) + '}'
else:
result.extend(pat_group.group(i).split(','))
return self.pattern_stripping(result)
return []
def validate_hostname_volume_pattern(self, val):
val_group = re.search("(.*):(.*)", val)
if not val_group:
return False
return True
def get_hostnames(self):
hosts = self.config_get_options('hosts', False)
for host in hosts:
Global.hosts += self.parse_patterns(host)
self.remove_from_sections('hosts')
def get_var_file_type(self):
'''
Decides if host_vars are to be created or everything can
fit into the group_vars file based on the options provided
in the configuration file. If all the hostnames are
present as sections in the configuration file, assumes
we need host_vars. Fails accordingly.
'''
if set(Global.hosts).intersection(set(Global.sections)):
if set(Global.hosts).issubset(set(Global.sections)):
Global.var_file = 'host_vars'
else:
msg = "Looks like you missed to give configurations " \
"for one or many host(s). Exiting!"
print "\nError: " + msg
Global.logger.error(msg)
self.cleanup_and_quit()
return True
elif 'devices' in self.sections or 'brick_dirs' in self.sections:
Global.var_file = 'group_vars'
return True
else:
return False
def get_options(self, section, required=False):
if hasattr(Global, 'var_file') and Global.var_file:
if Global.var_file == 'group_vars':
return self.config_get_options(section, required)
else:
try:
options = Global.sections[self.current_host].get(section)
except:
print "\nError: Couldn't fin value for %s option for "\
"host %s" %(section, self.current_host)
return self.split_comma_separated_options(options)
return self.section_dict.get(section)
def split_volume_and_hostname(self, val):
'''
This gives the user the flexibility to not give the hosts
section. Instead one can just specify the volume name
with one of the peer member's hostname or IP in the
format <hostname>:<volumename>
'''
if val:
val_group = re.search("(.*):(.*)", val)
if val_group:
hostname = self.parse_patterns(val_group.group(1))
try:
Global.master = [hostname[0]]
except:
pass
return val_group.group(2)
return val
def split_brickname_and_hostname(self, brick):
if not brick:
return None
brk_group = re.search("(.*):(.*)", brick)
if not brk_group:
print "\nError: Brick names should be in the format " \
"<hostname>:<brickname>. Exiting!"
self.cleanup_and_quit()
if brk_group.group(1) not in Global.brick_hosts:
Global.brick_hosts.append(brk_group.group(1))
re
|
e1528532/libelektra
|
src/bindings/gi/python/testgi_kdb.py
|
Python
|
bsd-3-clause
| 1,632
| 0.03125
|
import unittest
from gi.repository import GElektra as kdb
TEST_NS = "user/tests/gi_py3"
class Constants(unittest.TestCase):
def setUp(self):
pass
def test_kdbcon
|
fig_h(self):
self.assertIsInstance(kdb.DB_SYSTEM, str)
self.assertIsInstance(kdb.DB_USER, str)
self.assertIsInstance(kdb.DB_HOME, str)
self.assertIsInstance(kdb.DEBUG, int)
def test_kdb_h(self):
self.assertIsInstance(kdb.VERSION, str)
self.ass
|
ertIsInstance(kdb.VERSION_MAJOR, int)
self.assertIsInstance(kdb.VERSION_MINOR, int)
self.assertIsInstance(kdb.VERSION_MICRO, int)
self.assertIsNone(kdb.KS_END)
class KDB(unittest.TestCase):
def test_ctor(self):
self.assertIsInstance(kdb.KDB(), kdb.KDB)
error = kdb.Key()
self.assertIsInstance(kdb.KDB(error), kdb.KDB)
def test_get(self):
with kdb.KDB() as db:
ks = kdb.KeySet()
db.get(ks, "system/elektra")
import os
if os.getenv("CHECK_VERSION") is None:
key = ks["system/elektra/version/constants/KDB_VERSION"]
self.assertEqual(key.value, kdb.VERSION)
def test_set(self):
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
try:
key = ks[TEST_NS + "/mykey"]
except KeyError:
key = kdb.Key(TEST_NS + "/mykey")
ks.append(key)
key.value = "new_value"
db.set(ks, TEST_NS)
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
self.assertEqual(ks[TEST_NS + "/mykey"].value, "new_value")
def tearDownClass():
# cleanup
with kdb.KDB() as db:
ks = kdb.KeySet(100)
db.get(ks, TEST_NS)
ks.cut(kdb.Key(TEST_NS))
db.set(ks, TEST_NS)
if __name__ == '__main__':
unittest.main()
|
otfried/cs101
|
code/files/krw1.py
|
Python
|
gpl-3.0
| 1,258
| 0.031797
|
#
# Read KRW-USD rates
#
# We have data from 1994 to 2009
years = range(1994, 2010)
# read one year into list data
def read_year(yr, data):
fname = "data/%d.txt" % yr
f = open(fname, "r")
for l in f:
date1, value1 = l.split()
value = float(value1)
# convert to KRW per USD
value = int(1.0 / value)
# convert YYYY/MM/DD string to int
ys, ms, ds = date1.split("/")
date = 10000 * int(ys) + 100 * int(ms) + int(ds)
data.append((date, value))
f.close()
# read all files and return list
def read_all():
data = []
for yr in years:
read_year(yr, data)
return data
# compute average exchange rate for yr
def average(data, yr):
sum =
|
0
count = 0
start = yr * 10000
end = (yr + 1) * 10000
for d, v in data:
if start <= d < end:
sum += v
count += 1
return sum / count
def find_min(data):
vm = 99999
dm = None
for d, v in data:
if v < vm:
vm = v
dm = d
return dm, vm
def find_max(data):
vm = 0
dm = None
for d, v in data:
if v > vm:
|
vm = v
dm = d
return dm, vm
def main():
data = read_all()
print "Minimum:", find_min(data)
print "Maximum:", find_max(data)
for yr in years:
avg = average(data, yr)
print yr, avg
main()
|
sergecodd/FireFox-OS
|
B2G/gecko/ipc/ipdl/ipdl/cgen.py
|
Python
|
apache-2.0
| 3,280
| 0.007927
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0
|
/.
import os, sys
from ipdl.ast import Visitor
from ipdl.ast import IN, OUT, INOUT, ASYNC, SYNC, RPC
class CodePrinter:
def __init__(self, outf=sys.stdout, indentCols=4):
self.outf = outf
|
self.col = 0
self.indentCols = indentCols
def write(self, str):
self.outf.write(str)
def printdent(self, str=''):
self.write((' '* self.col) + str)
def println(self, str=''):
self.write(str +'\n')
def printdentln(self, str):
self.write((' '* self.col) + str +'\n')
def indent(self): self.col += self.indentCols
def dedent(self): self.col -= self.indentCols
##-----------------------------------------------------------------------------
class IPDLCodeGen(CodePrinter, Visitor):
'''Spits back out equivalent IPDL to the code that generated this.
Also known as pretty-printing.'''
def __init__(self, outf=sys.stdout, indentCols=4, printed=set()):
CodePrinter.__init__(self, outf, indentCols)
self.printed = printed
def visitTranslationUnit(self, tu):
self.printed.add(tu.filename)
self.println('//\n// Automatically generated by ipdlc\n//')
CodeGen.visitTranslationUnit(self, tu)
def visitCxxInclude(self, inc):
self.println('include "'+ inc.file +'";')
def visitProtocolInclude(self, inc):
self.println('include protocol "'+ inc.file +'";')
if inc.tu.filename not in self.printed:
self.println('/* Included file:')
IPDLCodeGen(outf=self.outf, indentCols=self.indentCols,
printed=self.printed).visitTranslationUnit(inc.tu)
self.println('*/')
def visitProtocol(self, p):
self.println()
for namespace in p.namespaces: namespace.accept(self)
self.println('%s protocol %s\n{'% (p.sendSemantics[0], p.name))
self.indent()
for mgs in p.managesStmts:
mgs.accept(self)
if len(p.managesStmts): self.println()
for msgDecl in p.messageDecls: msgDecl.accept(self)
self.println()
for transStmt in p.transitionStmts: transStmt.accept(self)
self.dedent()
self.println('}')
self.write('}\n'* len(p.namespaces))
def visitManagerStmt(self, mgr):
self.printdentln('manager '+ mgr.name +';')
def visitManagesStmt(self, mgs):
self.printdentln('manages '+ mgs.name +';')
def visitMessageDecl(self, msg):
self.printdent('%s %s %s('% (msg.sendSemantics[0], msg.direction[0], msg.name))
for i, inp in enumerate(msg.inParams):
inp.accept(self)
if i != (len(msg.inParams) - 1): self.write(', ')
self.write(')')
if 0 == len(msg.outParams):
self.println(';')
return
self.println()
self.indent()
self.printdent('returns (')
for i, outp in enumerate(msg.outParams):
outp.accept(self)
if i != (len(msg.outParams) - 1): self.write(', ')
self.println(');')
self.dedent()
|
mmadsen/axelrod-ct
|
madsenlab/axelrod/utils/convergence.py
|
Python
|
apache-2.0
| 839
| 0.004768
|
#!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
import logging as log
def check_liveness(ax, model, args, simconfig, timestep):
diff = timestep - model.get_time_last_interaction()
num_links = model.agentgraph.number_of_edges()
if (diff > (5 * num_links)):
#log.debug
|
("No interactions have occurred since %s - for %s ticks, which is 5 * %s network edges", model.get_time_last_interaction(), diff, num_links)
if ax.get_fraction_links_active() == 0.0:
|
log.debug("No active links found in the model, clear to finalize")
return False
else:
return True
else:
return True
|
dhardtke/foostat
|
Daemon.py
|
Python
|
mit
| 5,006
| 0.001798
|
#!/usr/bin/env python3
import sys
import os
import time
import atexit
from signal import SIGTERM
import logging
import logging.handlers
# load the config
from foostat import config
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
filehandler = logging.handlers.TimedRotatingFileHandler(config["files"]["log_file"], when='midnight', interval=1,
backupCount=10)
filehandler.setFormatter(logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(filehandler)
class Daemon(object):
"""
Subclass Daemon class and override the run() method.
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
Deamonize, do double-fork magic.
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent.
sys.exit(0)
except OSError as e:
message = "Fork #1 failed: {}\n".format(e)
sys.stderr.write(message)
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.setsid()
os.umask(0)
# Do second fork.
try:
pid = os.fork()
if pid > 0:
# Exit from second parent.
sys.exit(0)
except OSError as e:
message = "Fork #2 failed: {}\n".format(e)
sys.stderr.write(message)
logger.info('deamon going to background, PID: {}'.format(os.getpid()))
# Redirect standard file descriptors.
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
se = open(self.stderr, 'a+')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# fix current dir to script's dir
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Write pidfile.
pid = str(os.getpid())
open(self.pidfile, 'w+').write("{}\n".format(pid))
# Register a function to clean up.
atexit.register(self.delpid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start daemon.
"""
# Check pidfile to see if the daemon already runs.
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "Pidfile {} already exist. Daemon already running?\n".format(self.pidfile)
sys.stderr.write(message)
|
sys.exit(1)
# Start daemon.
self.daemonize()
self.run()
def status(self):
"""
Get status of daemon.
"""
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.c
|
lose()
except IOError:
message = "There is no PID file. Daemon already running?\n"
sys.stderr.write(message)
sys.exit(1)
try:
procfile = open("/proc/{}/status".format(pid), 'r')
procfile.close()
message = "There is a process with the PID {}\n".format(pid)
sys.stdout.write(message)
except IOError:
message = "There is not a process with the PID {}\n".format(self.pidfile)
sys.stdout.write(message)
def stop(self):
"""
Stop the daemon.
"""
# Get the pid from pidfile.
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError as e:
message = str(e) + "\nDaemon not running?\n"
sys.stderr.write(message)
sys.exit(1)
# Try killing daemon process.
try:
os.kill(pid, SIGTERM)
time.sleep(1)
except OSError as e:
print(str(e))
sys.exit(1)
try:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
except IOError as e:
message = str(e) + "\nCan not remove pid file {}".format(self.pidfile)
sys.stderr.write(message)
sys.exit(1)
def restart(self):
"""
Restart daemon.
"""
self.stop()
time.sleep(1)
self.start()
def run(self):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been daemonized by start() or restart().
Example:
class MyDaemon(Daemon):
def run(self):
while True:
time.sleep(1)
"""
|
Shadi-A/ExpediaCrawler
|
expediacrawler/soupParser.py
|
Python
|
mit
| 2,286
| 0.002625
|
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from pyvirtualdisplay import Display
__PROCESSOR = 'lxml'
__OFFERS_CLASS = 'offer-listing'
__PRICE_CLASS = 'dollars'
__FLIGHT_DURATION_CLASS = 'duration-emphasis'
__LAYOVR_CLASS = ''
__AIRLINE_CLASS = ''
def get_page_offers(url):
display = Display(visible=0, size=(800, 600))
display.start()
browser = webdriver.Firefox()
ffResults = browser.get(ur
|
l)
time.sleep(25)
full_content = browser.execute_script("return document.getElementsByTagName('html')[0].innerHTML")
browser.quit()
display.stop()
return __parse_offers_page(full_content)
def __parse_offers_page(html_content):
offers_list = __parse_offers_list(html_content)
prices_list = []
for offer in offers_list:
offer_object = __get_offer_object(offer)
if offer_object is not None:
prices_list.append(offer_object)
return pric
|
es_list
def __parse_offers_list(html_content):
soup = BeautifulSoup(html_content, __PROCESSOR)
offers = soup.find_all('li', class_=__OFFERS_CLASS)
return offers
def __get_offer_object(offer_html):
offer_price = __get_offer_price(offer_html)
offer_duration = __get_offer_duration(offer_html)
offer_airline = __get_offer_airline(offer_html)
if offer_price is not None and offer_duration is not None and offer_airline is not None:
return {'price': offer_price.strip(), 'duration': offer_duration.strip(), 'airline': offer_airline.strip()}
def __get_offer_price(offer_html):
offer_element = __find_element_using_class(offer_html, 'span', __PRICE_CLASS)
if offer_element is not None:
return __find_element_using_class(offer_html, 'span', __PRICE_CLASS).text
def __get_offer_duration(offer_html):
return __find_element_using_class(offer_html, 'div', __FLIGHT_DURATION_CLASS).text
def __find_elements_using_class(html_content, element, css_class):
soup = BeautifulSoup(html_content, __PROCESSOR)
return soup.find_all(element, class_=css_class)
def __find_element_using_class(html_content, element, css_class):
return html_content.find(element, class_=css_class)
def __get_offer_airline(offer_html):
return offer_html.find('div', {'data-test-id': 'airline-name'}).text
|
tvenkat/askbot-devel
|
askbot/search/postgresql/__init__.py
|
Python
|
gpl-3.0
| 723
| 0.004149
|
"""Procedures to initialize the full text search in PostgresQL"""
from django.db import connection
def setup_full_text_search(script_path):
"""using postgresql database connection,
installs
|
the plsql language, if necessary
and runs the stript, whose path is given as an argument
"""
fts_init_query = open(script_path).read()
cursor = connection.cursor()
try:
#test if language exists
cursor.execute("SELECT * FROM pg
|
_language WHERE lanname='plpgsql'")
lang_exists = cursor.fetchone()
if not lang_exists:
cursor.execute("CREATE LANGUAGE plpgsql")
#run the main query
cursor.execute(fts_init_query)
finally:
cursor.close()
|
i3visio/osrframework
|
osrframework/wrappers/pending/cloudflare/forocoches.py
|
Python
|
agpl-3.0
| 4,071
| 0.004914
|
################################################################################
#
# Copyright 2015-2020 Félix Brezo and Yaiza Rubio
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
__author__ = "Felix Brezo, Yaiza Rubio <contacto@i3visio.com>"
__version__ = "2.0"
from osrframework.utils.platforms import Platform
class Forocoches(Platform):
"""A <Platform> object for Forocoches"""
def __init__(self):
self.platformName = "Forocoches"
self.tags = ["opinions", "activism"]
# Add the URL for enumeration below
#self.urlEnumeration = "http://www.forocoches.com/foro/member.php?u=" + "<HERE_GOES_THE_USER_ID>"
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "http://www.forocoches.com/foro/member.php?username=" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query.
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
###################
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ["main error message"]
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regula
|
r expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
#self.fieldsRegExp["usufy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of
|
fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
shivan1b/pydelhi_mobile
|
pydelhiconf/libs/garden/garden.mapview/mapview/clustered_marker_layer.py
|
Python
|
agpl-3.0
| 13,896
| 0.001007
|
# coding=utf-8
"""
Layer that support point clustering
===================================
"""
from os.path import dirname, join
from math import sin, log, pi, atan, exp, floor, sqrt
from mapview.view import MapLayer, MapMarker
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.properties import (ObjectProperty, NumericProperty, StringProperty, ListProperty)
Builder.load_string("""
<ClusterMapMarker>:
size_hint: None, None
source: root.source
size: list(map(dp, self.texture_size))
allow_stretch: True
Label:
color: root.text_color
pos: root.pos
size: root.size
text: "{}".format(root.num_points)
font_size: dp(18)
""")
# longitude/latitude to spherical mercator in [0..1] range
def lngX(lng):
return lng / 360. + 0.5
def latY(lat):
if lat == 90:
return 0
if lat == -90:
return 1
s = sin(lat * pi / 180.)
y = (0.5 - 0.25 * log((1 + s) / (1 - s)) / pi)
return min(1, max(0, y))
# spherical mercator to longitude/latitude
def xLng(x):
return (x - 0.5) * 360
def yLat(y):
y2 = (180 - y * 360) * pi / 180
return 360 * atan(exp(y2)) / pi - 90
class KDBush(object):
# kdbush implementation from https://github.com/mourner/kdbush/blob/master/src/kdbush.js
#
def __init__(self, points, node_size=64):
super(KDBush, self).__init__()
self.points = points
self.node_size = node_size
self.ids = ids = [0] * len(points)
self.coords = coords = [0] * len(points) * 2
for i, point in enumerate(points):
ids[i] = i
coords[2 * i] = point.x
coords[2 * i + 1] = point.y
self._sort(ids, coords, node_size, 0, len(ids) - 1, 0)
def range(self, min_x, min_y, max_x, max_y):
return self._range(self.ids, self.coords, min_x, min_y, max_x, max_y,
self.node_size)
def within(self, x, y, r):
return self._within(self.ids, self.coords, x, y, r, self.node_size)
def _sort(self, ids, coords, node_size, left, right, depth):
if right - left <= node_size:
return
m = int(floor((left + right) / 2.))
self._select(ids, coords, m, left, right, depth % 2)
self._sort(ids, coords, node_size, left, m - 1, depth + 1)
self._sort(ids, coords, node_size, m + 1, right, depth + 1)
def _select(self, ids, coords, k, left, right, inc):
swap_item = self._swap_item
while right > left:
if (right - left) > 600:
n = float(right - left + 1)
m = k - left + 1
z = log(n)
s = 0.5 + exp(2 * z / 3.)
sd = 0.5 * sqrt(z * s * (n - s) / n) * (-1
if (m - n / 2.) < 0 else 1)
new_left = max(left, int(floor(k - m * s / n + sd)))
new_right = min(right, int(floor(k + (n - m) * s / n + sd)))
self._select(ids, coords, k, new_left, new_right, inc)
t = coords[2 * k + inc]
i = left
j = right
swap_item(ids, coords, left, k)
if coords[2 * right + inc] > t:
swap_item(ids, coords, left, right)
while i < j:
swap_item(ids, coords, i, j)
i += 1
j -= 1
while coords[2 * i + inc] < t:
i += 1
while coords[2 * j + inc] > t:
j -= 1
if coords[2 * left + inc] == t:
swap_item(ids, coords, left, j)
else:
j += 1
swap_item(ids, coords, j, right)
if j <= k:
left = j + 1
if k <= j:
right = j - 1
def _swap_item(self, ids, coords, i, j):
swap = self._swap
swap(ids, i, j)
swap(coords, 2 * i, 2 * j)
swap(coords, 2 * i + 1, 2 * j + 1)
def _swap(self, arr, i, j):
tmp = arr[i]
arr[i] = arr[j]
arr[j] = tmp
def _range(self, ids, coords, min_x, min_y, max_x, max_y, node_size):
stack = [0, len(ids) - 1, 0]
result = []
x = y = 0
while stack:
axis = stack.pop()
|
right = stack.pop()
left = stack.pop()
if right - left <= node_size:
for i in range(left, right + 1):
x = coords[2 * i]
y = coords[2 * i + 1
|
]
if (x >= min_x and x <= max_x and y >= min_y and
y <= max_y):
result.append(ids[i])
continue
m = int(floor((left + right) / 2.))
x = coords[2 * m]
y = coords[2 * m + 1]
if (x >= min_x and x <= max_x and y >= min_y and y <= max_y):
result.append(ids[m])
nextAxis = (axis + 1) % 2
if (min_x <= x if axis == 0 else min_y <= y):
stack.append(left)
stack.append(m - 1)
stack.append(nextAxis)
if (max_x >= x if axis == 0 else max_y >= y):
stack.append(m + 1)
stack.append(right)
stack.append(nextAxis)
return result
def _within(self, ids, coords, qx, qy, r, node_size):
sq_dist = self._sq_dist
stack = [0, len(ids) - 1, 0]
result = []
r2 = r * r
while stack:
axis = stack.pop()
right = stack.pop()
left = stack.pop()
if right - left <= node_size:
for i in range(left, right + 1):
if sq_dist(coords[2 * i], coords[2 * i + 1], qx, qy) <= r2:
result.append(ids[i])
continue
m = int(floor((left + right) / 2.))
x = coords[2 * m]
y = coords[2 * m + 1]
if sq_dist(x, y, qx, qy) <= r2:
result.append(ids[m])
nextAxis = (axis + 1) % 2
if (qx - r <= x) if axis == 0 else (qy - r <= y):
stack.append(left)
stack.append(m - 1)
stack.append(nextAxis)
if (qx + r >= x) if axis == 0 else (qy + r >= y):
stack.append(m + 1)
stack.append(right)
stack.append(nextAxis)
return result
def _sq_dist(self, ax, ay, bx, by):
dx = ax - bx
dy = ay - by
return dx * dx + dy * dy
class Cluster(object):
def __init__(self, x, y, num_points, id, props):
super(Cluster, self).__init__()
self.x = x
self.y = y
self.num_points = num_points
self.zoom = float("inf")
self.id = id
self.props = props
self.parent_id = None
self.widget = None
# preprocess lon/lat
self.lon = xLng(x)
self.lat = yLat(y)
class Marker(object):
def __init__(self, lon, lat, cls=MapMarker, options=None):
super(Marker, self).__init__()
self.lon = lon
self.lat = lat
self.cls = cls
self.options = options
# preprocess x/y from lon/lat
self.x = lngX(lon)
self.y = latY(lat)
# cluster information
self.id = None
self.zoom = float("inf")
self.parent_id = None
self.widget = None
def __repr__(self):
return "<Marker lon={} lat={} source={}>".format(self.lon, self.lat,
self.source)
class SuperCluster(object):
"""Port of supercluster from mapbox in pure python
"""
def __init__(self,
min_zoom=0,
max_zoom=16,
radius=40,
extent=512,
node_size=64):
super(SuperCluster, self).__init__()
self.min_zoom = min_zoom
self.max_zoom = max_zoom
self.radius = radius
self.extent = extent
self.node_size = node_size
def load(self, points):
"""Load an array o
|
appsembler/mayan_appsembler
|
fabfile/conf.py
|
Python
|
gpl-3.0
| 2,675
| 0.00972
|
import os
import string
import random
from fabric.api import env
from fabric.colors import green
from literals import (DEFAULT_INSTALL_PATH, DEFAULT_VIRTUALENV_NAME,
DEFAULT_REPOSITORY_NAME, DEFAULT_OS, OS_CHOICES,
DEFAULT_DATABASE_MANAGER, DB_CHOICES, DEFAULT_DATABASE_NAME,
DEFAULT_WEBSERVER, WEB_CHOICES, DEFAULT_DATABASE_USERNAME,
DJANGO_DB_DRIVERS, DEFAULT_DATABASE_HOST, DEFAULT_PASSWORD_LENGTH)
from server_config import reduce_env
def password_generator():
# http://snipplr.com/view/63223/python-password-generator/
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for x in range(DEFAULT_PASSWORD_LENGTH))
@reduce_env
def setup_environment():
env['os'] = getattr(env, 'os', DEFAULT_OS)
env['os_name'] = OS_CHOICES[env.os]
env['install_path'] = getattr(env, 'install_path', DEFAULT_INSTALL_PATH[env.os])
env['virtualenv_name'] = getattr(env, 'virtualenv_name', DEFAULT_VIRTUALENV_NAME[env.os])
env['repository_name'] = getattr(env, 'repository_name', DEFAULT_REPOSITORY_NAME[env.os])
env['virtualenv_path'] = os.path.join(env.install_path, env.virtualenv_name)
env['repository_path'] = os.path.join(env.virtualenv_path, env.repository_name)
env['database_manager'] = getattr(env, 'database_manager', DEFAULT_DATABASE_MANAGER)
env['database_manager_name'] = DB_CHOICES[env.database_manager]
env['database_username'] = getattr(env, 'database_username', DEFAULT_DATABASE_USERNAME)
env['database_password'] = getattr(env, 'database_password', password_generator())
env['database_host'] = getattr(env, 'database_host', DEFAULT_DATABASE_HOST)
env['drop_database'] = getattr(env, 'drop_database', False)
if not getattr(env, 'database_manager_admin_password', None):
print('Must set the database_manager_admin_password entry in the fabric settings file (~/.fabricrc by default)')
exit(1)
env['database_name'] = getattr(env, 'database_name', DEFAULT_DATABASE_NAME)
env['webserver'] = getattr(env, 'webserver', DEFAULT_WEBSERVER)
env['webserver_name'] = WEB_CHOICES[env.webserver]
env['django_database_d
|
river'] = DJANGO_DB_DRIVERS[env.database_manager]
def print_supported_configs():
|
print('Supported operating systems (os=): %s, default=\'%s\'' % (dict(OS_CHOICES).keys(), green(DEFAULT_OS)))
print('Supported database managers (database_manager=): %s, default=\'%s\'' % (dict(DB_CHOICES).keys(), green(DEFAULT_DATABASE_MANAGER)))
print('Supported webservers (webserver=): %s, default=\'%s\'' % (dict(WEB_CHOICES).keys(), green(DEFAULT_WEBSERVER)))
print('\n')
|
datamade/scrapers_ca_app
|
scrapers_ca_app/urls.py
|
Python
|
mit
| 423
| 0.007092
|
from d
|
jango.conf.urls import patterns, url, include
urlpatterns = patterns('',
('', include('imago.urls')),
url(r'^report/(?P<module_name>[a-z0-9_]+)/$', 'reports.views.report', name='report'),
url(r'^represent/(?P<modu
|
le_name>[a-z0-9_]+)/$', 'reports.views.represent', name='represent'),
url(r'^warnings/$', 'reports.views.warnings', name='warnings'),
url(r'^$', 'reports.views.home', name='home'),
)
|
amexperts/bounty
|
projects/views.py
|
Python
|
gpl-3.0
| 1,079
| 0.007414
|
from django.http import Http404
from django.shortcuts import render
from . import models
def index(request):
# Generate counts of some of the main ob
|
jects
num_projects = models.Project.objects.all().count()
num_tasks = models.Tasks.objects.all().count()
num_fundings = models.Fundings.objects.all().count()
num_projects_goal = (models.Project.objects.all().count() / 10) * 100;
projects = models.Project.objects.all()[:4]
featured_projects = models.Project.objects.all().filter(feature
|
d=1)
# Render the HTML template
return render(
request,
'home.html',
context={'projects': projects, 'featured': featured_projects, 'num_projects':num_projects, 'num_tasks':num_tasks, 'num_fundings':num_fundings, 'num_projects_goal':num_projects_goal},
)
def project(request, slug):
try:
p = models.Project.objects.get(slug=slug)
except models.Project.DoesNotExist:
raise Http404("Project does not exist")
return render(
request,
'sample.html',
context={'project': p}
)
|
jay4ek/cs3240-labdemo
|
hello.py
|
Python
|
mit
| 75
| 0.026667
|
f
|
rom helper import greeting
if "__name__" == "__main__":
|
greeting('hello')
|
rschnapka/purchase-workflow
|
framework_agreement/tests/test_framework_agreement_price_list.py
|
Python
|
agpl-3.0
| 4,066
| 0
|
from datetime import timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.osv import orm
import openerp.tests.common as test_common
from .common import BaseAgreementTestMixin
class TestAgreementPriceList(test_common.TransactionCase,
BaseAgreementTestMixin):
"""Test observer on change and purchase order on chnage"""
def setUp(self):
""" Create a default agreement
with 3 price line
qty 0 price 70
qty 200 price 60
qty 500 price 50
qty 1000 price 45
"""
super(TestAgreementPriceList, self).setUp()
self.commonsetUp()
cr, uid = self.cr, self.uid
start_date = self.now + timedelta(days=10)
start_date = start_date.strftime(DEFAULT_SERVER_DATE_FORMAT)
end_date = self.now + timedelta(days=20)
end_date = end_date.strftime(DEFAULT_SERVER_DATE_FORMAT)
agr_id = self.agreement_model.create(cr, uid,
{'supplier_id': self.supplier_id,
'product_id': self.product_id,
'start_date': start_date,
'end_date': end_date,
'delay': 5,
'draft': False,
'quantity': 1500})
pl_id = self.agreement_pl_model.create(cr, uid, {
'framework_agreement_id': agr_id,
'currency_id': self.ref('base.EUR')})
self.agreement_line_model.create(cr, uid, {
'framework_agreement_pricelist_id': pl_id,
'quantity': 0,
'price': 70.0})
self.agreement_line_model.create(cr, uid, {
'framework_agreement_pricelist_id': pl_id,
'quantity': 200,
'price': 60.0})
self.agreement_line_model.create(cr, uid, {
'framework_agreement_pricelist_id': pl_id,
'quantity': 500,
'price': 50.0})
self.agreement_line_model.create(cr, uid, {
'framework_agreement_pricelist_id': pl_id,
'quantity': 1000,
'price': 45.0})
self.agreement = self.agreement_model.browse(cr, uid, agr_id)
def test_00_test_qty(self):
"""Test if barem retrieval is correct"""
self.assertEqual(
self.agreement.get_price(0, currency=self.browse_ref('base.EUR')),
70.0)
self.assertEqual(
self.agreement.get_price(
100, currency=self.browse_ref('base.EUR')
), 70.0)
self.assertEqual(
self.agreement.get_price(
200, currency=self.browse_ref('base.EUR')
), 60.0)
self.assertEqual(
self.agreement.get_price(
210, currency=self.browse_ref('base.EUR')
), 60.0)
self.assertEqual(
self.agreement.get_price(
500, currency=self.browse_ref('base.EUR')
), 50.0)
self.assertEqual(
self.agreement.get_price(
800, currency=self.browse_ref('base.EUR')
), 50.0)
self.assertEqual(
s
|
elf.agreement.get_price(
999, currency=self.browse_ref('base.EUR')
), 50.0)
self.assertEqual(
self.agreement.get_price(
1000, currency=self.browse_ref('base.EUR')
), 45.0)
self.assertEqual(
self.agreement.get_price(
|
10000, currency=self.browse_ref('base.EUR')
), 45.0)
self.assertEqual(
self.agreement.get_price(
-10, currency=self.browse_ref('base.EUR')
), 70.0)
def test_01_failed_wrong_currency(self):
"""Tests that wrong currency raise an exception"""
with self.assertRaises(orm.except_orm):
self.agreement.get_price(0, currency=self.browse_ref('base.USD'))
|
fatrix/django-golive
|
golive/layers/queue.py
|
Python
|
bsd-2-clause
| 2,683
| 0.002609
|
from fabric.context_managers import settings
from golive.layers.base import BaseTask, DebianPackageMixin, IPTablesSetup
from golive.stacks.stack import environment
from golive.utils import get_remote_envvar
class RabbitMqSetup(BaseTask, DebianPackageMixin):
package_name = "rabbitmq-server"
GUEST_USER = "guest"
RABBITMQ_CONFIGFILE = "/etc/rabbitmq/rabbitmq.config"
RABBIT_INITSCRIPT = "/etc/init.d/rabbitmq-server"
NAME = "RABBITMQ"
VAR_BROKER_USER = "GOLIVE_BROKER_USER"
VAR_BROKER_PASSWORD = "GOLIVE_BROKER_PASSWORD"
ROLE = "QUEUE_HOST"
def init(self, update=True):
# add repo for rabbitmq
self._add_repo()
self.sudo("apt-get update")
DebianPackageMixin.init(self, update)
self._set_listen_port()
allow = [
(environment.hosts, IPTablesSetup.DESTINATION_ALL, "9101:9105"),
(environment.hosts, IPTablesSetup.DESTINATION_ALL, "4369"),
(environment.hosts, IPTablesSetup.DESTINATION_ALL, "8612"),
(environment.hosts, IPTablesSetup.DESTINATION_ALL, "5672"),
]
iptables = IPTablesSetup()
iptables.prepare_rules(allow)
iptables.set_rules(self.__class__.__name__)
iptables.activate()
self._delete_user(self.__class__.GUEST_USER)
def deploy(self):
self._create_user()
def status(self):
out = self.run("sudo %s status" % self.RABBIT_INITSCRIPT)
self._check_output(out, "running_applications", self.NAME)
def _set_listen_port(self):
self.append(self.__class__.RABBITMQ_CONFIGFILE,
"[{kernel, [ {inet_dist_listen_min, 9100}, {inet_dist_listen_max, 9105} ]}].")
def _add_repo(self):
# as described at http://www.rabbitmq.com/install-debian.html
self.append("/etc/apt/sources.list", "deb http://www.rabbitmq.com/debian/ testing main")
self.sudo("wget http://www.rabbitmq.com/rabbitmq-signing-key-public.asc")
self.sudo("apt-key add rabbitmq-signing-key-public.asc")
def _create_user(self):
username = get_remote_envvar(self.VAR_BROKER_USER, environment.get_r
|
ole(self.ROLE).hosts[0])
password = get_remote_envvar(self.VAR_BROKER_PASSWORD, environment.get_role(self.ROLE).hosts[0])
with settings(warn_only=True):
self.sudo("rabbitmqctl add_user %s %s" % (username, password))
self.sudo("rabbitmqctl set_permissions -p / %s \".*\
|
" \".*\" \".*\"" % username)
# TODO: create vhost
def _delete_user(self, username):
with settings(warn_only=True):
self.sudo("rabbitmqctl delete_user %s" % username)
|
mvaled/sentry
|
src/sentry/interfaces/debug_meta.py
|
Python
|
bsd-3-clause
| 1,172
| 0
|
from __future__ import absolute_import
__all__ = ("DebugMeta",)
from sentry.interfaces.base import Interface
from sentry.utils.json import prune_empty_keys
class DebugMeta(Interface):
"""
Holds debug meta information for processing stacktraces
and similar things. This information is deleted after event processing.
Currently two attributes exist:
``sdk_info``:
sets the SDK that is used for the system. This affects the lookup
for system symbols. If not defined, system symbols are not looked up.
``images``:
a list of debug images and their mappings.
"""
ephemeral = False
path = "debug_meta"
external_type = "debugmeta"
@classmethod
def to_python(cls, data):
return cls(
images=data.get("imag
|
es", None) or [],
sdk_info=data.get("sdk_info"),
is_debug_build=data.get("is_debug_build"),
)
def to_json(self):
return prune_empty_keys(
|
{
"images": self.images or None,
"sdk_info": self.sdk_info or None,
"is_debug_build": self.is_debug_build,
}
)
|
jason-weirather/IDP
|
bin/selectrow.py
|
Python
|
apache-2.0
| 1,076
| 0.01487
|
#!/usr/bin/python
import sys
import os
if len(sys.argv) >= 4 :
filename = sys.argv[1]
row_i = int(sys.argv[2])-1
target_ls_filename = sys.argv[3]
output_filename = sys.argv[4]
else:
print("usage: python selectrow.py filename row_i target_ls_filename")
print("or ./selectrow.py filename row_i target_ls_filename")
sys.exit(1)
################################################################################
file = open(filename,'r')
dt = {}
for line in file:
ls=line.strip().split('\t')
if not dt.has_key(ls[row_i]):
dt[ ls[row_i] ] = []
dt[ ls[row_i] ].append( line.strip() )
file.close()
################################################################################
output = open(output_filename,'w')
target_ls_file = open(target_ls_filename, 'r')
for line in target_ls_file:
id = line.strip()
if not dt.has_key(id):
print id
continue
if len(dt[id])>1:
print id + '\t' + str(len
|
(dt[id]))
for item in dt[id]:
output.write( item + '\n')
output.close()
target_ls_fil
|
e.close()
|
delete/estofadora
|
estofadora/login/tests.py
|
Python
|
mit
| 3,269
| 0.001835
|
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from .forms import LoginForm
class LoginViewTest(TestCase):
def setUp(self):
self.client = Client()
self.response = self.client.get(reverse('login:login'))
def tearDown(self):
self.client.logout()
def test_get(self):
self.assertEqual(self.response.status_code, 200)
def test_template(self):
self.assertTemplateUsed(self.response, 'login.html')
def test_html(self):
'HTML must have contain 3 inputs(user, pass and csrf token) and a submit'
self.assertContains(self.response, '<input', 3)
self.assertContains(self.response, 'submit')
class LoginPostTest(TestCase):
def setUp(self):
user = User.objects.create_user(
'admin', 'admin@admin.com', '123'
)
self.client = Client()
def tearDown(self):
self.client.logout()
User.objects.all().delete()
def test_already_logged(self):
'If already logged, will have a redirect, so, must return code 302'
self.response = self.client.post(
reverse('login:login'), self.make_validated_data()
)
self.response = self.client.get(reverse('login:login'))
self.assertEqual(self.response.status_code, 302)
def test_valid_login(self):
'With valid login, will have a redirect, so, must return code 302'
self.response = self.client.post(
reverse('login:login'), self.make_validated_data()
)
self.assertEqual(self.response.status_code, 302)
def test_invalid_logi
|
n(self):
'With invalid login, will not have a redirect, so, must return code 200'
self.response = self.client.post(
|
reverse('login:login'), self.make_validated_data(password='1')
)
self.assertEqual(self.response.status_code, 200)
def make_validated_data(self, **kwargs):
data = {
'username': 'admin',
'password': '123'
}
data.update(kwargs)
return data
#TODO - FIX THESE TESTS.
#I DON'T KNOW WHY IT IS NOT RETURNING ERRORS
#WHEN USERNAME OR PASSWORD IS EMPTY.
class LoginFormTest(TestCase):
def setUp(self):
user = User.objects.create_user('admin', 'admin@admin.com', '123')
def test_if_has_fields(self):
form = LoginForm()
existing_fields = list(form.fields.keys())
expected_field = ['username', 'password']
self.assertEqual(existing_fields, expected_field)
# def test_username_is_not_optional(self):
# form = self.make_validated_form(username='')
# self.assertTrue(form.errors)
# def test_password_is_not_optional(self):
# form = self.make_validated_form(password='')
# self.assertTrue(form.errors)
def test_form(self):
form = self.make_validated_form()
self.assertFalse(form.errors)
def make_validated_form(self, **kwargs):
data = {
'username': 'admin',
'password': '123',
}
data.update(kwargs)
form = LoginForm(data)
form.is_valid()
return form
|
SUSE/azure-sdk-for-python
|
azure-mgmt-storage/azure/mgmt/storage/v2015_06_15/models/usage.py
|
Python
|
mit
| 1,829
| 0
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Usage(Model):
"""Describes Storage Resource Usage.
:param unit: The unit of measurement. Possible values include: 'Count',
'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond'
:type unit: str or :class:`UsageUnit
<azure.mgmt.storage.v2015_06_15.models.UsageUnit>`
:param current_value: The current count of the allocated resour
|
ces in the
subscription.
:type current_value: int
:param limit: The maximum count of the resources that can be allocated in
the subscription.
:type limit: int
|
:param name: The name of the type of usage.
:type name: :class:`UsageName
<azure.mgmt.storage.v2015_06_15.models.UsageName>`
"""
_validation = {
'unit': {'required': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'UsageUnit'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
'name': {'key': 'name', 'type': 'UsageName'},
}
def __init__(self, unit, current_value, limit, name):
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = name
|
SlashRoot/WHAT
|
what_apps/utility/admin.py
|
Python
|
mit
| 665
| 0.006015
|
""" Django-admin autoregister -- automat
|
ic model registration
## sample admin.py ##
from yourproject.autoregister import autoregister
# register all models defined on each app
autoregister('app1', 'app2', 'app3', ...)
"""
from django.db.models import get_models, get_app
from django.contrib import admin
from django.contrib.admin.sites import AlreadyRegistered
def autoregister(*app_list):
for app_name in app_list:
app_models = get_app(app_name)
for model in get_models(app_models):
try:
|
admin.site.register(model)
except AlreadyRegistered:
pass
autoregister('utility')
|
arsfeld/conduit
|
test/python-tests/TestCoreTempFile.py
|
Python
|
gpl-2.0
| 4,008
| 0.010729
|
from common import *
import conduit.datatypes.File as File
import conduit.utils as Utils
import os
import tempfile
import datetime
import random
import stat
tmpdir = tempfile.mkdtemp()
ok("Created temp
|
dir %s" % tmpdir, True)
contents = Utils.rand
|
om_string()
name = Utils.random_string()+".foo"
tmpFile = File.TempFile(contents)
tmpFile.force_new_filename(name)
ok("Set filename to %s" % name, tmpFile._newFilename == name)
newPath = os.path.join(tmpdir, name)
tmpFile.transfer(tmpdir)
ok("Transferred -> %s" % newPath, os.path.isfile(newPath))
f = File.File(newPath)
ok("File contents = %s" % contents, f.get_contents_as_text() == contents)
mtime = f.get_mtime()
f = File.File(newPath)
ok("File name ok", f.get_filename() == name)
#make some 'real' files to play with
testDir = os.path.join(os.environ['TEST_DIRECTORY'],"TempFile")
if not os.path.exists(testDir):
os.mkdir(testDir)
testFiles = [
('/usr/bin/env','env',True,True),
('http://files.conduit-project.org/screenshot.png','screenshot.png',True,False)
]
for i in range(0,5):
j = Utils.random_string()
testFiles.append( (os.path.join(testDir,j),j,False,True) )
for path,i,readOnly,local in testFiles:
#1) create files
if not readOnly:
j = open(path,'w')
j.write(i)
j.close()
group = Utils.random_string()
f = File.File(path,group=group)
f.set_UID(Utils.random_string())
uid = f.get_UID()
size = f.get_size()
mt = f.get_mimetype()
#normal file operations on files, both r/o and writable
ok("not tempfile (%s)" % i, not f._is_tempfile())
ok("not tempfile uid ok", f.get_UID() == uid)
ok("not tempfile filename ok", f.get_filename() == i)
ok("not tempfile group ok", f.group == group)
nn = i+"-renamed"
f.force_new_filename(nn)
ok("not tempfile renamed ok", f.get_filename() == nn)
f.set_mtime(mtime)
ok("not tempfile set_mtime ok", f.get_mtime() == mtime)
#repeat the ops once we make the file a tempfile
if local:
tmppath = f.to_tempfile()
else:
tmppath = f.get_local_uri()
ok("tempfile (%s)" % tmppath, f.exists() and f._is_tempfile() and not f.is_directory())
ok("tempfile uid ok", f.get_UID() == uid)
ok("tempfile filename ok", f.get_filename() == nn)
ok("tempfile group ok", f.group == group)
ok("tempfile path is local", f.get_local_uri() == tmppath)
#check the transfer was ok
size2 = f.get_size()
ok("tempfile size is same", size == size2)
mt2 = f.get_mimetype()
ok("tempfile mimetype is same", mt == mt2)
#check that subsequent renames/mtimes are always deferred
#when the file is a tempfile
nn = i+"-renamed-again"
f.force_new_filename(nn)
ok("tempfile filename ok again", f.get_filename() == nn)
mtime2 = datetime.datetime.now()
f.set_mtime(mtime2)
ok("tempfile set_mtime ok again", f.get_mtime() == mtime2)
#check we can create a second tempfile with the same props
#and delete it, leaving the first tempfile behind
tmppath2 = f.to_tempfile()
ok("second tempfile (%s)" % tmppath2, tmppath2 != tmppath)
ok("second tempfile name == first tempfile name", f.get_filename() == nn)
f.delete()
ok("second tempfile deleted", not f.exists())
#get the first tempfile again, rename to original and copy to the original folder
f = File.File(tmppath)
ok("again tempfile (%s)" % tmppath, f.exists() and f._is_tempfile() and not f.is_directory())
f.force_new_filename(i)
ok("again tempfile filename ok", f.get_filename() == i)
ok("again tempfile path is local", f.get_local_uri() == tmppath)
f.transfer(testDir)
ok("again not tempfile filename ok", f.get_filename() == i)
if not readOnly:
#only makes sense to perform on files that were originally created in 1)
ok("again not tempfile path matches original", f.get_local_uri() == path)
ok("again not tempfile mtime ok", f.get_mtime() == mtime)
finished()
|
tboyce1/home-assistant
|
tests/components/hyperion/test_config_flow.py
|
Python
|
apache-2.0
| 26,352
| 0.002277
|
"""Tests for the Hyperion config flow."""
import logging
from typing import Any, Dict, Optional
from hyperion import const
from homeassistant import data_entry_flow
from homeassistant.components.hyperion.const import (
CONF_AUTH_ID,
CONF_CREATE_TOKEN,
CONF_PRIORITY,
DOMAIN,
)
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.config_entries import (
SOURCE_IMPORT,
SOURCE_REAUTH,
SOURCE_SSDP,
SOURCE_USER,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_PORT,
CONF_TOKEN,
SERVICE_TURN_ON,
)
from homeassistant.helpers.typing import HomeAssistantType
from . import (
TEST_AUTH_REQUIRED_RESP,
TEST_CONFIG_ENTRY_ID,
TEST_ENTITY_ID_1,
TEST_HOST,
TEST_INSTANCE,
TEST_PORT,
TEST_PORT_UI,
TEST_SYSINFO_ID,
TEST_TITLE,
TEST_TOKEN,
add_test_config_entry,
create_mock_client,
)
from tests.async_mock import AsyncMock, patch # type: ignore[attr-defined]
from tests.common import MockConfigEntry
_LOGGER = logging.getLogger(__name__)
TEST_IP_ADDRESS = "192.168.0.1"
TEST_HOST_PORT: Dict[str, Any] = {
CONF_HOST: TEST_HOST,
CONF_PORT: TEST_PORT,
}
TEST_AUTH_ID = "ABCDE"
TEST_REQUEST_TOKEN_SUCCESS = {
"command": "authorize-requestToken",
"success": True,
"info": {"comment": const.DEFAULT_ORIGIN, "id": TEST_AUTH_ID, "token": TEST_TOKEN},
}
TEST_REQUEST_TOKEN_FAIL = {
"command": "authorize-requestToken",
"success": False,
"error": "Token request timeout or denied",
}
TEST_SSDP_SERVICE_INFO = {
"ssdp_location": f"http://{TEST_HOST}:{TEST_PORT_UI}/description.xml",
"ssdp_st": "upnp:rootdevice",
"deviceType": "urn:schemas-upnp-org:device:Basic:1",
"friendlyName": f"Hyperion ({TEST_HOST})",
"manufacturer": "Hyperion Open Source Ambient Lighting",
"manufacturerURL": "https://www.hyperion-project.org",
"modelDescription": "Hyperion Open Source Ambient Light",
"modelName": "Hyperion",
"modelNumber": "2.0.0-alpha.8",
"modelURL": "https://www.hyperion-project.org",
"serialNumber": f"{TEST_SYSINFO_ID}",
"UDN": f"uuid:{TEST_SYSINFO_ID}",
"ports": {
"jsonServer": f"{TEST_PORT}",
"sslServer": "8092",
"protoBuffer": "19445",
"flatBuffer": "19400",
},
"presentationURL": "index.html",
"iconList": {
"icon": {
"mimetype": "image/png",
"height": "100",
"width": "100",
"depth": "32",
"url": "img/hyperion/ssdp_icon.png",
}
},
"ssdp_usn": f"uuid:{TEST_SYSINFO_ID}",
"ssdp_ext": "",
"ssdp_server": "Raspbian GNU/Linux 10 (buster)/10 UPnP/1.0 Hyperion/2.0.0-alpha.8",
}
async def _create_mock_entry(hass: HomeAssistantType) -> MockConfigEntry:
"""Add a test Hyperion entity to hass."""
entry: MockConfigEntry = MockConfigEntry( # type: ignore[no-untyped-call]
entry_id=TEST_CONFIG_ENTRY_ID,
domain=DOMAIN,
unique_id=TEST_SYSINFO_ID,
title=TEST_TITLE,
data={
"host": TEST_HOST,
"port": TEST_PORT,
"instance": TEST_INSTANCE,
},
)
entry.add_to_hass(hass) # type: ignore[no-untyped-call]
# Setup
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
async def _init_flow(
hass: HomeAssistantType,
source: str = SOURCE_USER,
data: Optional[Dict[str, Any]] = None,
) -> Any:
"""Initialize a flow."""
data = data or {}
return await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=data
)
async def _configure_flow(
hass: HomeAssistantType, result: Dict, user_input: Optional[Dict[str, Any]] = None
) -> Any:
"""Provide input to a flow."""
user_input = user_input or {}
with patch(
"homeassistant.components.hyperion.async_setup", return_value=True
), patch(
"homeassistant.components.hyperion.async_setup_entry",
return_value=True,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=user_input
)
await hass.async_block_till_done()
return result
async def test_user_if_no_configuration(hass: HomeAssistantType) -> None:
"""Check flow behavior when no configuration is present."""
result = await _init_flow(hass)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["handler"] == DOMAIN
async def test_user_existing_id_abort(hass: HomeAssistantType) -> None:
"""Verify a duplicate ID results in an abort."""
result = await _init_flow(hass)
await _create_mock_entry(hass)
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_user_client_errors(hass: HomeAssistantType) -> None:
"""Verify correct behaviour with client errors."""
result = await _init_flow(hass)
client = create_mock_client()
# Fail the connection.
client.async_client_connect = AsyncMock(return_value=False)
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["
|
base"] == "cannot_connect"
# Fail the auth check call.
client.async_client_con
|
nect = AsyncMock(return_value=True)
client.async_is_auth_required = AsyncMock(return_value={"success": False})
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "auth_required_error"
async def test_user_confirm_cannot_connect(hass: HomeAssistantType) -> None:
"""Test a failure to connect during confirmation."""
result = await _init_flow(hass)
good_client = create_mock_client()
bad_client = create_mock_client()
bad_client.async_client_connect = AsyncMock(return_value=False)
# Confirmation sync_client_connect fails.
with patch(
"homeassistant.components.hyperion.client.HyperionClient",
side_effect=[good_client, bad_client],
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_user_confirm_id_error(hass: HomeAssistantType) -> None:
"""Test a failure fetching the server id during confirmation."""
result = await _init_flow(hass)
client = create_mock_client()
client.async_sysinfo_id = AsyncMock(return_value=None)
# Confirmation sync_client_connect fails.
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_id"
async def test_user_noauth_flow_success(hass: HomeAssistantType) -> None:
"""Check a full flow without auth."""
result = await _init_flow(hass)
client = create_mock_client()
with patch(
"homeassistant.components.hyperion.client.HyperionClient", return_value=client
):
result = await _configure_flow(hass, result, user_input=TEST_HOST_PORT)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["handl
|
SymbiFlow/symbiflow-arch-defs
|
ice40/utils/ice40_list_layout_in_icebox.py
|
Python
|
isc
| 972
| 0
|
#!/usr/bin/env python3
# Python libs
import os.path
import sys
import icebox
# FIXME: Move this into icebox
parts = [
# LP Series (Low Power)
"lp384",
"lp1k",
"lp8k",
# Unsupported: "lp640", "lp4k" (alias for lp8k),
|
# LM Series (Low Power, Embedded IP)
# Unsupported: "lm1k", "lm2k",
"lm4k",
# HX Series (High Performance)
"hx1k",
"
|
hx8k",
# Unsupported: "hx4k" (alias for hx8k)
# iCE40 UltraLite
# Unsupported: "ul640", "ul1k",
# iCE40 Ultra
# Unsupported: "ice5lp1k", "ice5lp2k", "ice5lp4k",
# iCE40 UltraPLus
# Unsupported: "up3k",
"up5k",
]
def versions(part):
return [p for p in parts if p.endswith(part)]
if __name__ == "__main__":
for name, pins in icebox.pinloc_db.items():
part, package = name.split('-')
if ':' in package:
continue
for v in versions(part):
device = "{}.{}".format(v, package)
print(device)
|
mandyRae/pythonic-charlieplex
|
charlie.py
|
Python
|
mit
| 5,714
| 0.029051
|
'''
charlie.py
---class for controlling charlieplexed SparkFun 8x7 LED Array with the Raspberry Pi
Relies upon RPi.GPIO written by Ben Croston
The MIT License (MIT)
Copyright (c) 2016 Amanda Cole
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and asso
|
ciated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, a
|
nd/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import RPi.GPIO as GPIO, time, random
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
class Charlie:
'''
Class for control of the charlieplexed SparkFun 8x7 LED Array.
'''
def __init__(self, pins):
'''
pins: type 'list', list of ints for array pins a-h, in order [a,b,c,d,e,f,g,h]
'''
if len(pins) != 8:
print("You must specify eight, and only eight, pins.")
raise ValueError
for pin in pins:
if type(pin) != int:
print("Pins must be of type int.")
raise TypeError
GPIO.setup(pin, GPIO.OUT, initial = False)
a = pins[0]
b = pins[1]
c = pins[2]
d = pins[3]
e = pins[4]
f = pins[5]
g = pins[6]
h = pins[7]
self.array = [[[h,g],[g,h],[f,h],[e,h],[d,h],[c,h],[b,h],[a,h]], \
[[h,f],[g,f],[f,g],[e,g],[d,g],[c,g],[b,g],[a,g]], \
[[h,e],[g,e],[f,e],[e,f],[d,f],[c,f],[b,f],[a,f]], \
[[h,d],[g,d],[f,d],[e,d],[d,e],[c,e],[b,e],[a,e]], \
[[h,c],[g,c],[f,c],[e,c],[d,c],[c,d],[b,d],[a,d]], \
[[h,b],[g,b],[f,b],[e,b],[d,b],[c,b],[b,c],[a,c]], \
[[h,a],[g,a],[f,a],[e,a],[d,a],[c,a],[b,a],[a,b]]]
self.ALL_PINS = [a,b,c,d,e,f,g,h]
def switchOrigin(self):
'''
Places origin [0,0] in the diagonally opposite corner of where its current position.
'''
switched_array = self.array
switched_array.reverse()
for i in switched_array:
i.reverse()
self.array = switched_array
def clearDisplay(self):
'''
Clears display.
'''
GPIO.setup(self.ALL_PINS, GPIO.IN)
def displayPoint(self, coord):
'''
coord: type 'list', coordinates of single pixel to be lit
Lights a single pixel.
'''
self.clearDisplay()
GPIO.setup(self.array[coord[0]][coord[1]][0], GPIO.OUT, initial = 1)
GPIO.setup(self.array[coord[0]][coord[1]][1], GPIO.OUT, initial = 0)
def test(self):
'''
Displays all pixels in array, one at a time, starting with [0,0] and ending with [6,7].
'''
x = 0
y = 0
while y < 8:
self.displayPoint([x,y])
time.sleep(0.1)
x += 1
if x >= 7:
x = 0
y += 1
self.clearDisplay()
def display(self, pixels, duration):
'''
pixels: type 'list', list of pixels to be lit each in coordinate form [x,y]
duration: type 'int', duration to display coordinates
Lights specified pixels in array
'''
positives = []
for coord in pixels:
if self.array[coord[0]][coord[1]][0] not in positives:
positives.append([self.array[coord[0]][coord[1]][0],[]])
for i in positives: #[[a,[]],[b,[]],[h,[]]]
for coord in pixels:
if self.array[coord[0]][coord[1]][0] == i[0]:
if self.array[coord[0]][coord[1]][1] not in i[1]:
i[1].append(self.array[coord[0]][coord[1]][1])
t = 0
pause = 0.02/len(positives)
while t < duration:
for i in range(0, len(positives)):
self.clearDisplay()
GPIO.setup(positives[i][0], GPIO.OUT, initial = True)
GPIO.setup(positives[i][1], GPIO.OUT, initial = False)
time.sleep(pause)
t += pause
self.clearDisplay()
def screensaver(self, duration, fill = .5):
'''
duration: type 'int', duration to keep screensaver on
fill: type 'float', proportion of array to fill with pixels at any given time
Randomly displays pixels on array.
'''
if fill > 1 or fill < 0:
print("fill must be of type 'float' between 0 and 1...using default value instead.")
fill = 0.5
t = 0
while t < duration:
coords = []
while len(coords) < fill*56:
coord = [random.randint(0,6), random.randint(0,7)]
if coord not in coords:
coords.append(coord)
self.display(coords, 0.15)
t += 0.1
|
smcv/vectis
|
vectis/commands/bootstrap.py
|
Python
|
gpl-2.0
| 4,115
| 0
|
# Copyright © 2016-2017 Simon McVittie
# SPDX-License-Identifier: GPL-2.0+
# (see vectis/__init__.py)
import os
import pwd
import shutil
import subprocess
from tempfile import TemporaryDirectory
from debian.debian_support import (
Version,
)
from vectis.commands.new import vmdebootstrap_argv
from vectis.error import ArgumentError
from vectis.worker import (
VirtWorker,
)
def run(args):
if args.suite is None:
if args.worker_suite is not None:
args.suite = args.worker_suite
else:
raise ArgumentError('--suite must be specified')
architecture = args.architecture
keep = args._keep
kernel_package = args.get_kernel_package(architecture)
mirrors = args.get_mirrors()
out = args.write_qemu_image
qemu_image_size = args.qemu_image_size
storage = args.storage
vendor = args.vendor
suite = args.get_suite(vendor, args.suite)
uri = args._uri
vmdebootstrap_options = args.vmdebootstrap_options
default_dir = os.path.join(
storage, architecture, str(vendor), str(suite))
if uri is None:
uri = mirrors.lookup_suite(suite)
try:
version = subprocess.check_output(
['dpkg-query', '-W', '-f${Version}', 'vmdebootstrap'],
universal_newlines=True).rstrip('\n')
except subprocess.CalledProcessException:
# non-dpkg host, guess a recent version
version = Version('1.7')
debootstrap_version = Version('1.0.89')
else:
version = Version(version)
debootstrap_version = subprocess.check_output(
['dpkg-query', '-W', '-f${Version}', 'debootstrap'],
universal_newlines=True).rstrip('\n')
debootstrap_version = Version(debootstrap_version)
with TemporaryDirectory(prefix='vectis-bootstrap-') as scratch:
argv = [
'sudo',
os.path.join(
os.path.dirname(__file__), os.pardir,
'vectis-command-wrapper'),
'--',
]
vmdb_argv, debootstrap_argv, default_name = vmdebootstrap_argv(
version,
architecture=architecture,
components=args.components,
debootstrap_version=debootstrap_version,
kernel_package=kernel_package,
qemu_image_s
|
ize=qemu_image_size,
suite=suite,
uri=uri,
merged_usr=args._merged_usr,
)
argv.extend(vmdb_argv)
argv.append('--debootstrapopts=' + ' '.join(debootstrap_argv))
argv.extend(vmdebootstrap_options)
argv.append(
'--customize={}'.format(os.path.join(
os.path.dirname(__file__), os.pardir, 'setup-testbed')))
argv.append('--owner={}'.format(pwd.getpwuid(os.getuid())[0]))
argv.append('--image={}/ou
|
tput.raw'.format(scratch))
subprocess.check_call(argv)
subprocess.check_call([
'qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', '-c', '-p',
'{}/output.raw'.format(scratch),
'{}/output.qcow2'.format(scratch),
])
if out is None:
out = os.path.join(default_dir, default_name)
os.makedirs(os.path.dirname(out) or os.curdir, exist_ok=True)
shutil.move('{}/output.qcow2'.format(scratch), out + '.new')
try:
with VirtWorker(
['qemu', '{}.new'.format(out)],
storage=storage,
suite=suite,
mirrors=mirrors) as worker:
worker.check_call([
'env',
'DEBIAN_FRONTEND=noninteractive',
'apt-get',
'-y',
'--no-install-recommends',
'-t', suite.apt_suite,
'install',
'python3',
'sbuild',
'schroot',
])
except Exception:
if not keep:
os.remove(out + '.new')
raise
else:
os.rename(out + '.new', out)
|
recognai/spaCy
|
spacy/lang/ur/lemmatizer.py
|
Python
|
mit
| 921,455
| 0.000003
|
# coding: utf8
from __future__ import unicode_literals
# Adding a lemmatizer lookup table
# Documentation: https://spacy.io/docs/usage/adding-languages#lemmatizer
# Entries should be added in the following format:
LOOKUP = {
"آ": "آنا",
"آْباد": "آْباد",
"آثار": "آثار",
"آثارِ": "آثارِ",
"آثارالصنادید": "آثارالصنادید",
"آثارو": "آثار",
"آثاروں": "آثار",
"آڈیو": "آڈیو",
"آڈیوز": "آڈیوز",
"آغا": "آغا",
"آغاحشر": "آغاحشر",
"آغاز": "آغاز",
"آغازو": "آغاز",
"آغازوں": "آغاز",
"آغوش": "آغوش",
"آغوشو": "آغوش",
"آغوشوں": "آغوش",
"آغوشیں": "آغوش",
"آحباب": "آحباب",
"آحبابو": "آحباب",
"آحبابوں": "آحباب",
"آخَر": "آخَر",
"آخِر": "آخِر",
"آخر": "آخر",
"آخرالذکر": "آخرالذکر",
"آخرالزماں": "آخرالزماں",
"آخرالزمان": "آخرالزمان",
"آخردماغ": "آخردماغ",
"آخرکار": "آخرکار",
"آخرت": "آخرت",
"آخرتو": "آخرت",
"آخرتوں": "آخرت",
"آخرتیں": "آخرت",
"آٹے": "آٹا",
"آٹا": "آٹا",
"آٹو": "آٹا",
"آٹوں": "آٹا",
"آٹھ": "آٹھ",
"آٹھواں": "آٹھواں",
"آٹھووںں": "آٹھواں",
"آٹھویں": "آٹھواں",
"آشامی": "آشامی",
"آشامیاں": "آشامی",
"آشامیو": "آشامی",
"آشامیوں": "آشامی",
"آشانے": "آشانہ",
"آشانہ": "آشانہ",
"آشانو": "آشانہ",
"آشانوں": "آشانہ",
"آذاد": "آذاد",
"آذان": "آذان",
"آذانو": "آذان",
"آذانوں": "آذان",
"آذانیں": "آذان",
"آب": "آب",
"آبخورے": "آبخورہ",
"آبخورہ": "آبخورہ",
"آبخورو": "آبخورہ",
"آبخوروں": "آبخورہ",
"آبشار": "آبشار",
"آبشارو": "آبشار",
"آبشاروں": "آبشار",
"آبشاریں": "آبشار",
"آبا": "آبا",
"آبادی": "آبادی",
"آبادیاں": "آبادی",
"آبادیو": "آبادی",
"آبادیوں": "آبادی",
"آباؤ": "آبا",
"آباؤں": "آبا",
"آبائ": "آبائ",
"آبائو": "آبائ",
"آبائوں": "آبائ",
"آبائی": "آبائی",
"آبگینے": "آبگینہ",
"آبگینہ": "آبگینہ",
"آبگینو": "آبگینہ",
"آبگینوں": "آبگینہ",
"آبلے": "آبلہ",
"آبلہ": "آبلہ",
"آبلو": "آبلہ",
"آبلوں": "آبلہ",
"آبرو": "آبرو",
"آبروو": "آبرو",
"آبرووں": "آبرو",
"آبروؤ": "آبرو",
"آبروؤں": "آبرو",
"آبرویں": "آبرو",
"آبروئیں": "آبرو",
"آبی": "آبی",
"آداب": "آداب",
"آدم": "آدم",
"آدمو": "آدم",
"آدموں": "آدم",
"آدمی": "آدمی",
"آدمیاں": "آدمی",
"آدمیو": "آدمی",
"آدمیوں": "آدمی",
"آدرش": "آدرش",
"آدھمک": "آدھمکنا",
"آدھمکے": "آدھمکنا",
"آدھمکں": "آدھمکنا",
"آدھمکا": "آدھمکنا",
"آدھمکانے": "آدھمکنا",
"آدھمکانا": "آدھمکنا",
"آدھمکاتے": "آدھمکنا",
"آدھمکاتا": "آدھمکنا",
"آدھمکاتی": "آدھمکنا",
"آدھمکاتیں": "آدھمکنا",
"آدھمکاؤ": "آدھمکنا",
"آدھمکاؤں": "آدھمکنا",
"آدھمکائے": "آدھمکنا",
"آدھمکائی": "آدھمکنا",
"آدھمکائیے": "آدھمکنا",
"آدھمکائیں": "آدھمکنا",
"آدھمکایا": "آدھمکنا",
"آدھمکنے": "آدھمکنا",
"آدھمکنا": "آدھمکنا",
"آدھمکنی": "آدھمکنا",
"آدھمکتے": "آدھمکنا",
"آدھمکتا": "آدھمکنا",
"آدھمکتی": "آدھمکنا",
"آدھمکتیں": "آدھمکنا",
"آدھمکو": "آدھمکنا",
"آدھمکوں": "آدھمکنا",
"آدھمکی": "آدھمکنا",
"آدھمکیے": "آدھمکنا",
"آدھمکیں": "آدھمکنا",
"آفاقے": "آفاقہ",
"آفاقہ": "آفاقہ",
"آفاقو": "آفاقہ",
"آفاقوں": "آفاقہ",
"آفاقی": "آفاقی",
"آفاقیت": "آفاقیت",
"آفریں": "آفریں",
"آفریدے": "آفریدہ",
"آفریدہ": "آفریدہ",
"آفریدو": "آفریدہ",
"آفریدوں": "آفریدہ",
"آفرین": "آفرین",
"آفرینو": "آفرین",
"آفرینوں": "آفرین",
"آفت": "آفت",
"آفتاب": "آفتاب",
"آفتابو": "آفتاب",
"آفتابوں": "آفتاب",
"آفتو": "آفت",
"آفتوں": "آفت",
"آفتیں": "آفت",
"آفیسر": "آفیسر",
"آفیسرو": "آفیسر",
"آفیسروں": "آفیسر",
"آگرے": "آگرا",
"آگرا": "آگرا",
"آگرہ": "آگرہ",
"آگرو": "آگرا",
"آگروں": "آگرا",
"آگیا": "آگیا",
"آگیاؤں": "آگیا",
"آگیائیں": "آگیا",
"آہ": "آہ",
"آہٹ": "آہٹ",
"آہٹو": "آہٹ",
"آہٹوں": "آہٹ",
"آہٹیں": "آہٹ",
"آہو": "آہ",
"آہوں": "آہ",
"آہیں": "آہ",
"آل": "آل",
"آلُو": "آلُو",
"آلُوؤ": "آلُو",
"آلُوؤں": "آلُو",
"آلے": "آلا",
"آلا": "آلا",
"آلو": "آلا",
"آلوں": "آلا",
"آلودگی": "آلودگی",
"آلودگیاں": "آلودگی",
"آلودگیو": "آلودگی",
"آلودگیوں": "آلودگی",
"آلوؤ": "آلو",
"آلوؤں": "آلو",
"آلیں":
|
"آل",
"آم": "آم",
"آمد": "آمد",
"آمدنی": "آمدنی",
"آمدنیاں": "آمدنی",
"آمدنیو": "آمدنی",
"آمدنیوں": "آمدنی",
"آمدو": "آمد",
"آمدوں": "آمد",
"آمدیں": "آمد",
"آملے": "آملہ",
"آملہ": "آمل
|
ہ",
"آملو": "آملہ",
"آملوں": "آملہ",
"آمنا": "آمنا",
"آمر": "آمر",
"آمرو": "آمر",
"آمروں": "آمر",
"آمو": "آم",
"آموں": "آم",
"آنے": "آنہ",
"آنا": "آنا",
"آندھی": "آندھی",
"آندھیاں": "آندھی",
"آندھیو": "آندھی",
"آندھیوں": "آندھی",
"آنگن": "آنگن",
"آنگنو": "آنگن",
"آنگنوں": "آنگن",
"آنہ": "آنہ",
"آنکے": "آنکہ",
"آنکہ": "آنکہ",
"آنکو": "آنکہ",
"آنکوں": "آنکہ",
"آنکھ": "آنکھ",
"آنکھو": "آنکھ",
"آنکھوں": "آنکھ",
"آنکھیں": "آنکھ",
"آنسو": "آنسو",
"آنسوو": "آنسو",
"آنسووں": "آنسو",
"آنسوؤ": "آنسو",
"آنسوؤں": "آنسو",
"آنت": "آنت",
"آنتو": "آنت",
"آنتوں": "آنت",
"آنتیں": "آنت",
"آنو": "آنہ",
"آنوں": "آنہ",
"آنی": "آنا",
"آپ": "آپ",
"آپْکے": "میرا",
"آپْکا": "میرا",
"آپْکی": "میرا",
"آپَس": "آپَس",
"آپکے": "میرا",
"آپکا": "میرا",
"آپکی": "میرا",
"آپس": "آپَس",
"آقا": "آقا",
"آقاؤ": "آقا",
"آقاؤں": "آقا",
"آرے": "آرہ",
"آرٹسٹ": "آرٹسٹ",
"آرٹسٹو": "آرٹسٹ",
"آرٹسٹوں": "آرٹسٹ",
"آرائش": "آرائش",
"آرائشو": "آرائش",
"آرائشوں": "آرائش",
"آرائشیں": "آرائش",
"آرائی": "آرائی",
"آرائیاں": "آرائی",
"آرائیو": "آرائی",
"آرائیوں": "آرائی",
"آرہ": "آرہ",
"آرو": "آرہ",
"آروں": "آرہ",
"آرز": "آرز",
"آرزے": "آرزہ",
"آرزہ": "آرزہ",
"آرزو": "آرزہ",
"آرزوں": "آرزہ",
"آرزوؤ": "آرزو",
"آرزوؤں": "آرزو",
"آرزوئیں": "آرزو",
"آسامی": "آسامی",
"آسامیاں": "آسامی",
"آسامیو": "آسامی",
"آسامیوں": "آسامی",
"آسانی": "آسانی",
"آسانیا": "آسانیا",
"آسانیاں": "آسانی",
"آسانیو": "آسانی",
"آسانیوں": "آسانی",
"آسائش": "آسائش",
"آسائشو": "آسائش",
"آسائشوں": "آسائش",
"آسائشیں": "آسائش",
"آسمان": "آسمان",
"آسمانو": "آسمان",
"آسمانوں": "آسمان",
"آستانے": "آستانہ",
"آستانہ": "آستانہ",
"آستانو": "آستانہ",
"آستانوں": "آستانہ",
"آستن": "آستن",
"آستنو": "آستن",
"آستنوں": "آستن",
"آستین": "آستین",
"آستینو": "آستین",
"آستینوں": "آستین",
"آستینیں": "آستین",
"آتے": "آنا",
"آتا": "آنا",
"آتی": "آنا",
"آتیں": "آنا",
"آؤ": "آؤ",
"آؤں": "آنا",
"آوارگی": "آوارگی",
"آوارگیاں": "آوارگی",
"آوارگیو": "آوارگی",
"آوارگیوں": "آوارگی",
"آواز": "آواز",
"آوازے": "آوازہ",
"آوازہ": "آوازہ",
"آوازو": "آوازہ",
"آوازوں": "آوازہ",
"آوازیں": "آواز",
"آور": "آور",
"آورو": "آور",
"آوروں": "آور",
"آئے": "آنا",
"آئنے": "آئنہ",
"آئنہ": "آئنہ",
"آئنو": "آئنہ",
"آئنوں": "آئنہ",
"آئی": "آئی",
"آئیے": "آنا",
"آئیں": "آنا",
"آئین": "آئین",
"آئینے": "آئینہ",
"آئینہ": "آئینہ",
"آئینو": "آئینہ",
"آئینوں": "آئینہ",
"آئیو": "آئی",
"آئیوں": "آئی",
"آیا": "آیا",
"آیات": "آیات",
"آیاتو": "آیات",
"آیاتوں": "آیات",
"آیاؤ": "آیا",
"آیاؤں": "آیا",
"آیائیں": "آیا",
"آیت": "آیت",
"آیتو": "آیت",
"آیتوں": "آیت",
"آیتیں": "آیت",
"آزاد": "آزاد",
"آزادو": "آزاد",
"آزادوں": "آزاد",
"آزادی": "آزادی",
"آزادیاں": "آزادی",
"آزادیو": "آزادی",
"آزادیوں": "آزادی",
"آزما": "آزمانا",
"آزمانے": "آزمانا",
"آزمانا": "آزمانا",
"آزمانی": "آزمانا",
"آزماتے": "آزمانا",
"آزماتا": "آزمانا",
"آزماتی": "آزمانا",
"آزماتیں": "آزمانا",
"آزماؤ": "آزمانا",
"آزماؤں": "آزمانا",
"آزمائے": "آزمانا",
"آزمائش": "آزمائش",
"آزمائشو": "آزمائش",
"آزمائشوں": "آزمائش",
"آزمائشیں": "آزمائش",
"آزمائی": "آزمائی",
"آزمائیے": "آزمانا",
"آزمائیں": "آزمانا",
"آزمائیاں": "آزمائی",
"آزمائیو": "آزمائی",
"آزمائیوں": "آزمائی",
"آزمایا": "آزمانا",
"ثقافت": "ثقافت",
"ثقافتو": "ثقافت",
"ثقافتوں": "ثقافت",
"ثقافتیں": "ثقافت",
"ڈِبْیا": "ڈِبْیا",
"ڈِبْیاں": "ڈِبْیا",
"ڈِبْیو": "ڈِبْیا",
"ڈِبْیوں": "ڈِبْیا",
"ڈاک": "ڈاک",
"ڈاکُو": "ڈاکُو",
"ڈاکُوؤ": "ڈاکُو",
"ڈاکُوؤں": "ڈاکُو",
"ڈاکے": "ڈاکا",
"ڈاکخانے": "ڈاکخانہ",
"ڈاکخانہ": "ڈاکخانہ",
"ڈاکخانو": "ڈاکخانہ",
"ڈاکخانوں": "ڈاکخانہ",
"ڈاکٹر": "ڈاکٹر",
"ڈاکٹرو": "ڈاکٹر",
"ڈاکٹروں": "ڈاکٹر",
"ڈاکا": "ڈاکا",
"ڈاکہ": "ڈاکہ",
"ڈاکو": "ڈاکا",
"ڈاکوں": "ڈاکا",
"ڈاکوؤ": "ڈاکو",
"ڈاکوؤں": "ڈاکو",
"ڈال": "ڈالنا",
"ڈالے": "ڈالا",
"ڈالں": "ڈالنا",
"ڈالا": "ڈالا",
"ڈالنے": "ڈالنا",
"ڈالن
|
111pontes/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_patch_panel_cfg.py
|
Python
|
apache-2.0
| 2,513
| 0.035814
|
""" Cisco_IOS_XR_patch_panel_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR patch\-panel package configuration.
This module contains definit
|
ions
for the following management objects\:
patch\-panel\: patch\-panel service submode
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types impo
|
rt Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class PatchPanel(object):
"""
patch\-panel service submode
.. attribute:: enable
Enable patch\-panel service
**type**\: :py:class:`Empty<ydk.types.Empty>`
**mandatory**\: True
.. attribute:: ipv4
IP address for patch\-panel
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: password
Password name to be used for Authentication with Patch\-Panel
**type**\: str
**pattern:** (!.+)\|([^!].+)
.. attribute:: user_name
User name to be used for Authentication with Patch\-Panel
**type**\: str
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'patch-panel-cfg'
_revision = '2015-11-09'
def __init__(self):
self._is_presence = True
self.enable = None
self.ipv4 = None
self.password = None
self.user_name = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-patch-panel-cfg:patch-panel'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if self._is_presence:
return True
if self.enable is not None:
return True
if self.ipv4 is not None:
return True
if self.password is not None:
return True
if self.user_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_patch_panel_cfg as meta
return meta._meta_table['PatchPanel']['meta_info']
|
maferelo/saleor
|
tests/api/test_core_reordering.py
|
Python
|
bsd-3-clause
| 8,739
| 0.001831
|
import pytest
from saleor.graphql.core.utils.reordering import perform_reordering
from saleor.product import models
SortedModel = models.AttributeValue
def _sorted_by_order(items):
return sorted(items, key=lambda o: o[1])
def _get_sorted_map():
return list(
SortedModel.objects.values_list("pk", "sort_order").order_by("sort_order")
)
@pytest.fixture
def dummy_attribute():
return models.Attribute.objects.create(name="Dummy")
@pytest.fixture
def sorted_entries_seq(dummy_attribute):
attribute = dummy_attribute
values = SortedModel.objects.bulk_create(
[
SortedModel(
attribute=attribute, slug=f"value-{i}", name=f"Value-{i}", sort_order=i
)
for i in range(6)
]
)
return list(values)
@pytest.fixture
def sorted_entries_gaps(dummy_attribute):
attribute = dummy_attribute
values = SortedModel.objects.bulk_create(
[
SortedModel(
attribute=attribute, slug=f"value-{i}", name=f"Value-{i}", sort_order=i
)
for i in range(0, 12, 2)
]
)
return list(values)
def test_reordering_sequential(sorted_entries_seq):
"""
Ensures the reordering logic works as expected. This test simply provides
sequential sort order values and try to reorder them.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
operations = {nodes[5].pk: -1, nodes[2].pk: +3}
expected = _sorted_by_order(
[
(nodes[0].pk, 0),
(nodes[1].pk, 1),
(nodes[2].pk, 2 + 3),
(nodes[3].pk, 3 - 1),
(nodes[4].pk, 4 + 1 - 1),
(nodes[5].pk, 5 - 1 - 1),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_non_sequential(sorted_entries_gaps):
"""
Ensures that reordering non-sequential sort order values is properly
handled. This case happens when an item gets deleted, creating gaps between values.
"""
qs = SortedModel.objects
nodes = sorted_entries_gaps
operations = {nodes[5].pk: -1, nodes[2].pk: +3}
expected = _sorted_by_order(
[
(nodes[0].pk, 0),
(nodes[1].pk, 2),
(nodes[2].pk, 4 + (3 * 2) - 1),
(nodes[3].pk, 6 - 1),
(nodes[4].pk, 8 + 1 - 1),
(nodes[5].pk, 10 - (1 * 2) - 1),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
@pytest.mark.parametrize(
"operation, expected_operations",
[((0, +5), (+5, -1, -1, -1, -1, -1)), ((5, -5), (+1, +1, +1, +1, +1, -5))],
)
def test_inserting_at_the_edges(sorted_entries_seq, operation, expected_operations):
"""
Ensures it is possible to move an item at the top and bottom of the list.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
target_node_pos, new_rel_sort_order = operation
operations = {nodes[target_node_pos].pk: new_rel_sort_order}
expected = _sorted_by_order(
[
(node.pk, node.sort_order + op)
for node, op in zip(nodes, expected_operations)
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_out_of_bound(sorted_entries_seq):
"""
Ensures it is not possible to manually create gaps or for the users
to insert anywhere they want, e.g. -1000, which could create a mess
into the database.
"""
qs = SortedModel.objects
nodes = sorted_entries_seq
operations = {nodes[5].pk: -100, nodes[0].pk: +100}
expected = _sorted_by_order(
[
(nodes[0].pk, 0 + 5),
(nodes[1].pk, 1),
(nodes[2].pk, 2),
(nodes[3].pk, 3),
(nodes[4].pk, 4),
(nodes[5].pk, 5 - 5),
]
)
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_reordering_null_sort_orders(dummy_attribute):
"""
Ensures null sort orders values are getting properly ordered (by ID sorting).
"""
attribute = dummy_attribute
qs = SortedModel.objects
non_null_sorted_entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=1
),
SortedModel(
pk=2, attribute=attribute, slug="2", name="2", sort_order=0
),
]
)
)
null_sorted_entries = list(
qs.bulk_create(
[
SortedModel(
pk=5, attribute=attribute, slug="5", name="5", sort_order=None
),
SortedModel(
pk=4, attribute=attribute, slug="4", name="4", sort_order=None
),
SortedModel(
pk=3, attribute=attribute, slug="3", name="3", sort_order=None
|
),
]
)
)
operations = {null_sorted_entries[0].pk: -2}
expected = [
(non_null_sorted_entries[1].pk, 0),
(non_null_sorted_entries[0].pk, 1),
(null_sorted_entries[0].pk, 2),
(null_sorted_entries[2].pk, 3),
(null_sorted_entries[1].pk, 4),
]
perform_reordering(qs, operations)
actual = _get_sorted_map()
assert actual == expected
def test_r
|
eordering_nothing(sorted_entries_seq, assert_num_queries):
"""
Ensures giving operations that does nothing, are skipped. Thus only one query should
have been made: fetching the nodes.
"""
qs = SortedModel.objects
pk = sorted_entries_seq[0].pk
operations = {pk: 0}
with assert_num_queries(1) as ctx:
perform_reordering(qs, operations)
assert ctx[0]["sql"].startswith("SELECT "), "Should only have done a SELECT"
def test_giving_no_operation_does_no_query(sorted_entries_seq, assert_num_queries):
"""Ensures giving no operations runs no queries at all."""
qs = SortedModel.objects
with assert_num_queries(0):
perform_reordering(qs, {})
def test_reordering_concurrently(dummy_attribute, assert_num_queries):
"""
Ensures users cannot concurrently reorder, they need to wait for the other one
to achieve.
This must be the first thing done before doing anything. For that, we ensure
the first SQL query is acquiring the lock.
"""
qs = SortedModel.objects
attribute = dummy_attribute
entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=0
),
SortedModel(
pk=2, attribute=attribute, slug="2", name="2", sort_order=1
),
]
)
)
operations = {entries[0].pk: +1}
with assert_num_queries(2) as ctx:
perform_reordering(qs, operations)
assert ctx[0]["sql"] == (
'SELECT "product_attributevalue"."id", "product_attributevalue"."sort_order" '
'FROM "product_attributevalue" '
"ORDER BY "
'"product_attributevalue"."sort_order" ASC NULLS LAST, '
'"product_attributevalue"."id" ASC FOR UPDATE'
)
assert ctx[1]["sql"] == (
'UPDATE "product_attributevalue" '
'SET "sort_order" = (CASE WHEN ("product_attributevalue"."id" = 1) '
'THEN 1 WHEN ("product_attributevalue"."id" = 2) '
"THEN 0 ELSE NULL END)::integer "
'WHERE "product_attributevalue"."id" IN (1, 2)'
)
def test_reordering_deleted_node_from_concurrent(dummy_attribute, assert_num_queries):
"""
Ensures if a node was deleted before locking, it just skip it instead of
raising an error.
"""
qs = SortedModel.objects
attribute = dummy_attribute
entries = list(
qs.bulk_create(
[
SortedModel(
pk=1, attribute=attribute, slug="1", name="1", sort_order=0
),
SortedModel(
|
unreal666/outwiker
|
src/test/plugins/statistics/test_loading.py
|
Python
|
gpl-3.0
| 587
| 0
|
# -*- coding: utf-8 -*-
import unittest
from test.basetestcases import PluginLoadingMixin
class StatisticsLoadingTest (PluginLoadingMixin, unittest.TestCase):
def getPluginDir(self):
"""
Должен возвращать путь до папки с тестируемым плагином
"""
return "../plugins/sta
|
tistics"
def getPluginName(self):
"""
Должен возвращ
|
ать имя плагина, по которому его можно
найти в PluginsLoader
"""
return "Statistics"
|
BrainIntensive/OnlineBrainIntensive
|
resources/HCP/ciftify/ciftify/config.py
|
Python
|
mit
| 8,757
| 0.006052
|
#!/usr/bin/env python
"""
These functions search the environment for software dependencies and configuration.
"""
from __future__ import unicode_literals
import os
import subprocess
import logging
import pkg_resources
import ciftify.utilities as util
def find_workbench():
"""
Returns path of the workbench bin/ folder, or None if unavailable.
"""
try:
workbench = util.check_output('which wb_command')
workbench = workbench.strip()
except:
workbench = None
return workbench
def find_fsl():
"""
Returns the path of the fsl bin/ folder, or None if unavailable.
"""
# Check the FSLDIR environment variable first
shell_val = os.getenv('FSLDIR')
dir_fsl = os.path.join(shell_val, 'bin') if shell_val else ''
if os.path.exists(dir_fsl):
return dir_fsl
# If the env var method fails, fall back to using which. This method is
# not used first because sometimes the executable is installed separately
# from the rest of the fsl package, making it hard (or impossible) to locate
# fsl data files based on the returned path
try:
dir_fsl = util.check_output('which fsl')
dir_fsl = '/'.join(dir_fsl.split('/')[:-1])
except:
dir_fsl = None
return dir_fsl
def find_freesurfer():
"""
Returns the path of the freesurfer bin/ folder, or None if unavailable.
"""
try:
dir_freesurfer = util.check_output('which recon-all')
dir_freesurfer = '/'.join(dir_freesurfer.split('/')[:-1])
except:
dir_freesurfer = None
return dir_freesurfer
def find_msm():
try:
msm = util.check_output("which msm")
except:
msm = None
return msm.replace(os.linesep, '')
def find_scene_templates():
"""
Returns the hcp scene templates path. If the shell variable
HCP_SCENE_TEMPLATES is set, uses that. Otherwise returns the defaults
stored in the ciftify/data/scene_templates folder.
"""
dir_hcp_templates = os.getenv('HCP_SCENE_TEMPLATES')
if dir_hcp_templates is None:
ciftify_path = os.path.dirname(__file__)
dir_hcp_templates = os.path.abspath(os.path.join(find_ciftify_global(),
'scene_templates'))
return dir_hcp_templates
def find_ciftify_global():
"""
Returns the path to ciftify required config and support files. If the
shell variable CIFTIFY_DATA is set, uses that. Otherwise returns the
defaults stored in the ciftify/data folder.
"""
dir_templates = os.getenv('CIFTIFY_DATA')
if dir_templates is None:
ciftify_path = os.path.dirname(__file__)
dir_templates = os.path.abspath(os.path.join(ciftify_path, 'data'))
return dir_templates
def find_HCP_S900_GroupAvg():
"""return path to HCP_S900_GroupAvg which should be in ciftify"""
s900 = os.path.join(find_ciftify_global(), 'HCP_S900_GroupAvg_v1')
return s900
def find_freesurfer_data():
"""
Returns the freesurfer data path defined in the environment.
"""
try:
dir_freesurfer_data = os.getenv('SUBJECTS_DIR')
except:
dir_freesurfer_data = None
return dir_freesurfer_data
def find_hcp_data():
"""
Returns the freesurfer data path defined in the environment.
"""
try:
dir_hcp_data = os.getenv('HCP_DATA')
except:
dir_hcp_data = None
return dir_hcp_data
def wb_command_version():
'''
Returns version info about wb_command.
Will raise an error if wb_command is not found, since the scripts that use
this depend heavily on wb_command and should crash anyway in such
an unexpected situation.
'''
wb_path = find_workbench()
if wb_path is None:
raise EnvironmentError("wb_command not found. Please check that it is "
"installed.")
wb_help = util.check_output('wb_command')
wb_version = wb_help.split(os.linesep)[0:3]
sep = '{} '.format(os.linesep)
wb_v = sep.join(wb_version)
all_info = 'wb_command: {}Path: {} {}'.format(sep,wb_path,wb_v)
return(all_info)
def freesurfer_version():
'''
Returns version info for freesurfer
'''
fs_path = find_freesurfer()
if fs_path is None:
raise EnvironmentError("Freesurfer cannot be found. Please check that "
"it is installed.")
try:
fs_buildstamp = os.path.join(os.path.dirname(fs_path),
'build-stamp.txt')
with open(fs_bu
|
ildstamp, "r") as text_file:
bstamp = text_file.read()
except:
return "freesurfer build information not found."
bstamp = bstamp.replace(os.linesep,'')
info = "free
|
surfer:{0}Path: {1}{0}Build Stamp: {2}".format(
'{} '.format(os.linesep),fs_path, bstamp)
return info
def fsl_version():
'''
Returns version info for FSL
'''
fsl_path = find_fsl()
if fsl_path is None:
raise EnvironmentError("FSL not found. Please check that it is "
"installed")
try:
fsl_buildstamp = os.path.join(os.path.dirname(fsl_path), 'etc',
'fslversion')
with open(fsl_buildstamp, "r") as text_file:
bstamp = text_file.read()
except:
return "FSL build information not found."
bstamp = bstamp.replace(os.linesep,'')
info = "FSL:{0}Path: {1}{0}Version: {2}".format('{} '.format(os.linesep),
fsl_path, bstamp)
return info
def msm_version():
'''
Returns version info for msm
'''
msm_path = find_msm()
if not msm_path:
return "MSM not found."
try:
version = util.check_output('msm --version').replace(os.linesep, '')
except:
version = ''
info = "MSM:{0}Path: {1}{0}Version: {2}".format('{} '.format(os.linesep),
msm_path, version)
return info
def ciftify_version(file_name=None):
'''
Returns the path and the latest git commit number and date if working from
a git repo, or the version number if working with an installed copy.
'''
logger = logging.getLogger(__name__)
try:
version = pkg_resources.get_distribution('ciftify').version
except pkg_resources.DistributionNotFound:
# Ciftify not installed, but a git repo, so return commit info
pass
else:
return "Ciftify version {}".format(version)
try:
dir_ciftify = util.check_output('which {}'.format(file_name))
except subprocess.CalledProcessError:
file_name = None
dir_ciftify = __file__
ciftify_path = os.path.dirname(dir_ciftify)
git_log = get_git_log(ciftify_path)
if not git_log:
logger.error("Something went wrong while retrieving git log. Returning "
"ciftify path only.")
return "Ciftify:{0}Path: {1}".format(os.linesep, ciftify_path)
commit_num, commit_date = read_commit(git_log)
info = "Ciftify:{0}Path: {1}{0}{2}{0}{3}".format('{} '.format(os.linesep),
ciftify_path, commit_num, commit_date)
if not file_name:
return info
## Try to return the file_name's git commit too, if a file was given
file_log = get_git_log(ciftify_path, file_name)
if not file_log:
# File commit info not found
return info
commit_num, commit_date = read_commit(file_log)
info = "{1}{5}Last commit for {2}:{0}{3}{0}{4}".format('{} '.format(
os.linesep), info, file_name, commit_num,
commit_date, os.linesep)
return info
def get_git_log(git_dir, file_name=None):
git_cmd = ["cd {}; git log".format(git_dir)]
if file_name:
git_cmd.append("--follow {}".format(file_name))
git_cmd.append("| head")
git_cmd = " ".join(git_cmd)
# Silence stderr
try:
with open(os.devnull, 'w') as DEVNULL:
file_log = util.check_output(git_cmd, stderr=DEVNULL)
except subprocess.CalledProcessError:
# Fail safe in git command returns non-zero value
logger = logging.getLogger(__name__)
logger.error("Unrecognized command: {} "
"\nReturning empty git log.".format(git_cmd))
file_log = ""
return file_log
def r
|
jdemel/gnuradio
|
gr-digital/python/digital/qa_linear_equalizer.py
|
Python
|
gpl-3.0
| 5,126
| 0.013461
|
#!/usr/bin/env python
#
# Copyright 2020 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest
import random, numpy
from gnuradio import digital, blocks, channels
class qa_linear_equalizer(gr_unittest.TestCase):
def unpack_values(self, values_in, bits_per_value, bits_per_symbol):
# verify that 8 is divisible by bits_per_symbol
m = bits_per_value / bits_per_symbol
# print(m)
mask = 2**(bits_per_symbol)-1
if bits_per_value != m*bits_per_symbol:
print("error - bits per symbols must fit nicely into bits_per_value bit values")
return []
num_values = len(values_in)
num_symbols = int(num_values*( m) )
cur_byte = 0
cur_bit = 0
out = []
for i in range(num_symbols):
s = (values_in[cur_byte] >> (bits_per_value-bits_per_symbol-cur_bit)) & mask
out.append(s)
cur_bit += bits_per_symbol
if cur_bit >= bits_per_value:
cur_bit = 0
cur_byte += 1
return out
def map_symbols_to_constellation(self, symbols, cons):
l = list(map(lambda x: cons.points()[x], symbols))
return l
def setUp(self):
random.seed(987654)
self.tb = gr.top_block()
self.num_data = num_data = 10000
self.sps = sps = 4
self.eb = eb = 0.35
self.preamble = preamble = [0x27,0x2F,0x18,0x5D,0x5B,0x2A,0x3F,0x71,0x63,0x3C,0x
|
17,0x0C,0x0A,0x41,0xD6,0x1F,0x4C,0x23,0x65,0x68,0xED,0x1C,0x77,0xA7,0x0E,0x0A,0x9E,0x47,0x82,0xA4,0x57,0x24,]
self.payload_size = payload_size = 300 # bytes
self.data = data = [0]*4+[random.getrandbits(8) for i
|
in range(payload_size)]
self.gain = gain = .001 # LMS gain
self.corr_thresh = corr_thresh = 3e6
self.num_taps = num_taps = 16
def tearDown(self):
self.tb = None
def transform(self, src_data, gain, const):
SRC = blocks.vector_source_c(src_data, False)
EQU = digital.lms_dd_equalizer_cc(4, gain, 1, const.base())
DST = blocks.vector_sink_c()
self.tb.connect(SRC, EQU, DST)
self.tb.run()
return DST.data()
def test_001_identity(self):
# Constant modulus signal so no adjustments
const = digital.constellation_qpsk()
src_data = const.points()*1000
N = 100 # settling time
expected_data = src_data[N:]
result = self.transform(src_data, 0.1, const)[N:]
N = -500
self.assertComplexTuplesAlmostEqual(expected_data[N:], result[N:], 5)
def test_qpsk_3tap_lms_training(self):
# set up fg
gain = 0.01 # LMS gain
num_taps = 16
num_samp = 2000
num_test = 500
cons = digital.constellation_qpsk().base()
rxmod = digital.generic_mod(cons, False, self.sps, True, self.eb, False, False)
modulated_sync_word_pre = digital.modulate_vector_bc(rxmod.to_basic_block(), self.preamble+self.preamble, [1])
modulated_sync_word = modulated_sync_word_pre[86:(512+86)] # compensate for the RRC filter delay
corr_max = numpy.abs(numpy.dot(modulated_sync_word,numpy.conj(modulated_sync_word)))
corr_calc = self.corr_thresh/(corr_max*corr_max)
preamble_symbols = self.map_symbols_to_constellation(self.unpack_values(self.preamble, 8, 2), cons)
alg = digital.adaptive_algorithm_lms(cons, gain).base()
evm = digital.meas_evm_cc(cons, digital.evm_measurement_t.EVM_PERCENT)
leq = digital.linear_equalizer(num_taps, self.sps, alg, False, preamble_symbols, 'corr_est')
correst = digital.corr_est_cc(modulated_sync_word, self.sps, 12, corr_calc, digital.THRESHOLD_ABSOLUTE)
constmod = digital.generic_mod(
constellation=cons,
differential=False,
samples_per_symbol=4,
pre_diff_code=True,
excess_bw=0.35,
verbose=False,
log=False)
chan = channels.channel_model(
noise_voltage=0.0,
frequency_offset=0.0,
epsilon=1.0,
taps=(1.0 + 1.0j, 0.63-.22j, -.1+.07j),
noise_seed=0,
block_tags=False)
vso = blocks.vector_source_b(self.preamble+self.data, True, 1, [])
head = blocks.head(gr.sizeof_float*1, num_samp)
vsi = blocks.vector_sink_f()
self.tb.connect(vso, constmod, chan, correst, leq, evm, head, vsi)
self.tb.run()
# look at the last 1000 samples, should converge quickly, below 5% EVM
upper_bound = list(20.0*numpy.ones((num_test,)))
lower_bound = list(0.0*numpy.zeros((num_test,)))
output_data = vsi.data()
output_data = output_data[-num_test:]
self.assertLess(output_data, upper_bound)
self.assertGreater(output_data, lower_bound)
if __name__ == '__main__':
gr_unittest.run(qa_linear_equalizer)
|
isb-cgc/ISB-CGC-Webapp
|
scripts/isb_auth.py
|
Python
|
apache-2.0
| 4,266
| 0.003516
|
#
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authenticates user for accessing the ISB-CGC Endpoint APIs.
#
# May be run from the command line or in scripts/ipython.
#
# The credentials file can be copied to any machine from which you want
# to access the API.
#
# 1. Command Line
# python ./isb_auth.py saves the user's credentials;
# OPTIONAL:
# -v for verbose (returns token!)
# -s FILE sets credentials file [default: ~/.isb_credentials]
# -u URL-only: for use over terminal connections;
# gives user a URL to paste into their browser,
# and asks for an auth code in return
#
# 2. Python
# import isb_auth
# isb_auth.get_credentials()
#
# # optional: to store credentials in a different location
# from oauth2client.file import Storage
# import isb_auth
# import os
#
# storage_file = os.path.join(os.path.expanduser("~"), "{USER_CREDENTIALS_FILE_NAME}")
# storage = Storage(storage_file)
# isb_auth.get_credentials(storage=storage)
#
from __future__ import print_function
from argparse import ArgumentParser
import os
from oauth2client.client import OAuth2WebServerFlow
from oauth2client import tools
from oauth2client.file import Storage
VERBOSE = False
# for native application - same as settings.INSTALLED_APP_CLIENT_ID
CLIENT_ID = '586186890913-atr969tu3lf7u574khjjplb45fgpq1bg.apps.googleusercontent.com'
# NOTE: this is NOT actually a 'secret' -- we're using the 'installed
# application' OAuth pattern here
CLIENT_SECRET = 'XeBxiK7NQ0yvAkAnRIKufkFE'
EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
DEFAULT_STORAGE_FILE = os.path.join(os.path.expanduser("~"), '.isb_credentials')
def maybe_print(msg):
if VERBOSE:
print(msg)
def get_credentials(storage=None, oauth_flow_args=[]):
noweb = '--noauth_local_webserver'
if __name__ != '__main__' and noweb not in oauth_flow_args:
oauth_flow_args.append(noweb)
if storage is None:
storage = Storage(DEFAULT_STORAGE_FILE)
credentials = storage.get()
if not credentials or credentials.invalid:
maybe_print('credentials missing/invalid, kicking off OAuth flow')
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, EMAIL_SCOPE)
flow.auth_uri = flow.auth_uri.rstrip('/') + '?approval_prompt=force'
credentials = tools.run_flow(flow, storage, tools.argparser.parse_args(oauth_flow_args))
return credentials
def main():
global VERBOSE
args = parse_args()
oauth_flow_args = [args.noauth_local_webserver] if args.noauth_local_webserver else []
VERBOSE = args.verbose
maybe_print('--verbose: printing extra information')
storage = Storage(args.storage_file)
credentials = get_credentials(storage, oauth_flow_args)
maybe_print('credentials stored in ' + args.storage_file)
maybe_print('access_token: ' + credentials.access_token)
maybe_print('refresh_token: ' + credentials
|
.refresh_token)
def parse_args():
parser = ArgumentParser()
parser.add_argument(
|
'--storage_file', '-s', default=DEFAULT_STORAGE_FILE, help='storage file to use for the credentials (default is {})'.format(DEFAULT_STORAGE_FILE))
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='display credentials storage location, access token, and refresh token')
parser.set_defaults(verbose=False)
parser.add_argument('--noauth_local_webserver','-u', action='store_const', const='--noauth_local_webserver')
return parser.parse_args()
if __name__ == '__main__':
main()
|
ClemsonSoCUnix/django-sshkey
|
django_sshkey/south_migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 5,536
| 0.008309
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserKey'
db.create_table('sshkey_userkey', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=50, blank=True)),
('key', self.gf('django.db.models.fields.TextField')(max_length=2000)),
('fingerprint', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=47, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('last_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('django_sshkey', ['UserKey'])
# Adding unique constraint on 'UserKey', fields ['user', 'name']
db.create_unique('sshkey_userkey', ['user_id', 'name'])
def backwards(self, orm):
|
# Removing unique constraint on 'UserKey', fields ['user', 'name']
db.delete_unique('sshkey_userkey', ['user_id', 'name'])
# Deleting model 'UserKey'
db.delete_table
|
('sshkey_userkey')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_sshkey.userkey': {
'Meta': {'unique_together': "[('user', 'name')]", 'object_name': 'UserKey', 'db_table': "'sshkey_userkey'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '47', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['django_sshkey']
|
obsh/tornado
|
tornado/websocket.py
|
Python
|
apache-2.0
| 41,216
| 0.000146
|
"""Implementation of the WebSocket protocol.
`WebSockets <http://dev.w3.org/html5/websockets/>`_ allow for bidirectional
communication between the browser and server.
WebSockets are supported in the current versions of all major browsers,
although older versions that do not support WebSockets are still in use
(refer to http://caniuse.com/websockets for details).
This module implements the final version of the WebSocket protocol as
defined in `RFC 6455 <http://tools.ietf.org/html/rfc6455>`_. Certain
browser versions (notably Safari 5.x) implemented an earlier draft of
the protocol (known as "draft 76") and are not compatible with this module.
.. versionchanged:: 4.0
Removed support for the draft 76 protocol version.
"""
from __future__ import absolute_import, division, print_function, with_statement
# Author: Jacob Kristhammar, 2010
import base64
import collections
import hashlib
import os
import struct
import tornado.escape
import tornado.web
import zlib
from tornado.concurrent import TracebackFuture
from tornado.escape import utf8, native_str, to_unicode
from tornado import httpclient, httputil
from tornado.ioloop import IOLoop
from tornado.iostream import StreamClosedError
from tornado.log import gen_log, app_log
from tornado import simple_httpclient
from tornado.tcpclient import TCPClient
from tornado.util import _websocket_mask, PY3
if PY3:
from urllib.parse import urlparse # py2
xrange = range
else:
from urlparse import urlparse # py3
class WebSocketError(Exception):
pass
class WebSocketClosedError(WebSocketError):
"""Raised by operations on a closed connection.
.. versionadded:: 3.2
"""
pass
class WebSocketHandler(tornado.web.RequestHandler):
"""Subclass this class to create a basic WebSocket handler.
Override `on_message` to handle incoming messages, and use
`write_message` to send messages to the client. Yo
|
u can also
override `open` and `on_close` to handle opened and closed
connections.
See http://dev.w3.org/html5/websockets/ for details on the
JavaScript interface. The protocol is specified at
http://tools.ietf.org/html/rfc6455.
Here is an example WebSocket handler that echos back all received messages
back to the client:
.. testcode::
class EchoWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
|
print("WebSocket opened")
def on_message(self, message):
self.write_message(u"You said: " + message)
def on_close(self):
print("WebSocket closed")
.. testoutput::
:hide:
WebSockets are not standard HTTP connections. The "handshake" is
HTTP, but after the handshake, the protocol is
message-based. Consequently, most of the Tornado HTTP facilities
are not available in handlers of this type. The only communication
methods available to you are `write_message()`, `ping()`, and
`close()`. Likewise, your request handler class should implement
`open()` method rather than ``get()`` or ``post()``.
If you map the handler above to ``/websocket`` in your application, you can
invoke it in JavaScript with::
var ws = new WebSocket("ws://localhost:8888/websocket");
ws.onopen = function() {
ws.send("Hello, world");
};
ws.onmessage = function (evt) {
alert(evt.data);
};
This script pops up an alert box that says "You said: Hello, world".
Web browsers allow any site to open a websocket connection to any other,
instead of using the same-origin policy that governs other network
access from javascript. This can be surprising and is a potential
security hole, so since Tornado 4.0 `WebSocketHandler` requires
applications that wish to receive cross-origin websockets to opt in
by overriding the `~WebSocketHandler.check_origin` method (see that
method's docs for details). Failure to do so is the most likely
cause of 403 errors when making a websocket connection.
When using a secure websocket connection (``wss://``) with a self-signed
certificate, the connection from a browser may fail because it wants
to show the "accept this certificate" dialog but has nowhere to show it.
You must first visit a regular HTML page using the same certificate
to accept it before the websocket connection will succeed.
"""
def __init__(self, application, request, **kwargs):
super(WebSocketHandler, self).__init__(application, request, **kwargs)
self.ws_connection = None
self.close_code = None
self.close_reason = None
self.stream = None
self._on_close_called = False
@tornado.web.asynchronous
def get(self, *args, **kwargs):
self.open_args = args
self.open_kwargs = kwargs
# Upgrade header should be present and should be equal to WebSocket
if self.request.headers.get("Upgrade", "").lower() != 'websocket':
self.clear()
self.set_status(400)
log_msg = "Can \"Upgrade\" only to \"WebSocket\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Connection header should be upgrade.
# Some proxy servers/load balancers
# might mess with it.
headers = self.request.headers
connection = map(lambda s: s.strip().lower(),
headers.get("Connection", "").split(","))
if 'upgrade' not in connection:
self.clear()
self.set_status(400)
log_msg = "\"Connection\" must be \"Upgrade\"."
self.finish(log_msg)
gen_log.debug(log_msg)
return
# Handle WebSocket Origin naming convention differences
# The difference between version 8 and 13 is that in 8 the
# client sends a "Sec-Websocket-Origin" header and in 13 it's
# simply "Origin".
if "Origin" in self.request.headers:
origin = self.request.headers.get("Origin")
else:
origin = self.request.headers.get("Sec-Websocket-Origin", None)
# If there was an origin header, check to make sure it matches
# according to check_origin. When the origin is None, we assume it
# did not come from a browser and that it can be passed on.
if origin is not None and not self.check_origin(origin):
self.clear()
self.set_status(403)
log_msg = "Cross origin websockets not allowed"
self.finish(log_msg)
gen_log.debug(log_msg)
return
self.stream = self.request.connection.detach()
self.stream.set_close_callback(self.on_connection_close)
self.ws_connection = self.get_websocket_protocol()
if self.ws_connection:
self.clear_header('Content-Type')
self.ws_connection.accept_connection()
else:
if not self.stream.closed():
self.stream.write(tornado.escape.utf8(
"HTTP/1.1 426 Upgrade Required\r\n"
"Sec-WebSocket-Version: 7, 8, 13\r\n\r\n"))
self.stream.close()
def write_message(self, message, binary=False):
"""Sends the given message to the client of this Web Socket.
The message may be either a string or a dict (which will be
encoded as json). If the ``binary`` argument is false, the
message will be sent as utf8; in binary mode any byte string
is allowed.
If the connection is already closed, raises `WebSocketClosedError`.
.. versionchanged:: 3.2
`WebSocketClosedError` was added (previously a closed connection
would raise an `AttributeError`)
.. versionchanged:: 4.3
Returns a `.Future` which can be used for flow control.
"""
if self.ws_connection is None:
raise WebSocketClosedError()
if isinstance(message, dict):
message = tornado.escape.json_encode(message)
return self.ws_connection.write_message(message, binary=binary)
def select_subprotocol(self, subp
|
pacoqueen/bbinn
|
formularios/trazabilidad_articulos.py
|
Python
|
gpl-2.0
| 71,269
| 0.007906
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## trazabilidad_articulos.py - Trazabilidad de rollo, bala o bigbag
###################################################################
## NOTAS:
##
## ----------------------------------------------------------------
##
####
|
###############################################################
## Changelog:
## 24 de mayo de 2006 -> Inicio
## 24 de mayo de 2006 -> It's alive!
###################################################################
## DONE: Imprimir toda la información en PDF sería lo suyo.
###################################################################
from ventana import Ventana
import utils
import pygtk
pygtk.requi
|
re('2.0')
import gtk, gtk.glade, time, sqlobject
import sys, os
try:
import pclases
except ImportError:
sys.path.append(os.path.join('..', 'framework'))
import pclases
import mx, mx.DateTime
sys.path.append(os.path.join('..', 'informes'))
from barcode import code39
from barcode.EANBarCode import EanBarCode
from reportlab.lib.units import cm
class TrazabilidadArticulos(Ventana):
def __init__(self, objeto = None, usuario = None):
self.usuario = usuario
Ventana.__init__(self, 'trazabilidad_articulos.glade', objeto,
self.usuario)
connections = {'b_salir/clicked': self.salir,
'b_buscar/clicked': self.buscar,
'b_imprimir/clicked': self.imprimir
}
self.add_connections(connections)
#self.wids['e_num'].connect("key_press_event", self.pasar_foco)
self.wids['ventana'].resize(800, 600)
self.wids['ventana'].set_position(gtk.WIN_POS_CENTER)
if objeto != None:
self.rellenar_datos(objeto)
self.wids['e_num'].grab_focus()
gtk.main()
def imprimir(self, boton):
"""
Vuelca toda la información de pantalla en bruto a un PDF.
"""
import informes, geninformes
datos = "Código de trazabilidad: %s\n\n"%self.wids['e_num'].get_text()
for desc, txt in (("Producto:\n", self.wids['txt_producto']),
("Lote/Partida:\n", self.wids['txt_lp']),
("Albarán de salida:\n", self.wids['txt_albaran']),
("Producción:\n", self.wids['txt_produccion'])):
buffer = txt.get_buffer()
texto = buffer.get_text(buffer.get_start_iter(),
buffer.get_end_iter())
datos += desc + texto + "\n\n"
informes.abrir_pdf(geninformes.trazabilidad(datos))
def pasar_foco(self, widget, event):
if event.keyval == 65293 or event.keyval == 65421:
self.wids['b_buscar'].grab_focus()
def chequear_cambios(self):
pass
def buscar_bigbag(self, txt):
ar = None
if isinstance(txt, str):
txt = utils.parse_numero(txt)
ars = pclases.Bigbag.select(pclases.Bigbag.q.numbigbag == txt)
if ars.count() == 1:
ar = ars[0]
elif ars.count() > 1:
filas = [(a.id, a.numbigbag, a.codigo) for a in ars]
idbigbag = utils.dialogo_resultado(filas,
titulo = "Seleccione bigbag",
cabeceras = ('ID', 'Número de bigbag', 'Código'),
padre = self.wids['ventana'])
if idbigbag > 0:
ar = pclases.Bigbag.get(idbigbag)
return ar
def buscar_bala(self, txt):
ar = None
if isinstance(txt, str):
txt = utils.parse_numero(txt)
ars = pclases.Bala.select(pclases.Bala.q.numbala == txt)
if ars.count() == 1:
ar = ars[0]
elif ars.count() > 1:
filas = [(a.id, a.numbala, a.codigo) for a in ars]
idbala = utils.dialogo_resultado(filas,
titulo = "Seleccione bala",
cabeceras = ('ID', 'Número de bala', 'Código'),
padre = self.wids['ventana'])
if idbala > 0:
ar = pclases.Bala.get(idbala)
return ar
def buscar_rollo(self, txt):
ar = None
if isinstance(txt, str):
txt = utils.parse_numero(txt)
ars = pclases.Rollo.select(pclases.Rollo.q.numrollo == txt)
if ars.count() == 1:
ar = ars[0]
elif ars.count() > 1:
filas = [(a.id, a.numrollo, a.codigo) for a in ars]
idrollo = utils.dialogo_resultado(filas,
titulo = "Seleccione rollo",
cabeceras = ('ID', 'Número de rollo', 'Código'),
padre = self.wids['ventana'])
if idrollo > 0:
ar = pclases.Rollo.get(idrollo)
return ar
def buscar_articulo(self, txt):
ar = None
if isinstance(txt, str):
txt = utils.parse_numero(txt)
ars = pclases.Rollo.select(pclases.Rollo.q.numrollo == txt)
if ars.count() == 0:
ar = self.buscar_bala(txt)
elif ars.count() == 1:
ar = ars[0]
else:
ar = self.buscar_rollo(txt)
return ar
def buscar(self, b):
a_buscar = self.wids['e_num'].get_text().strip().upper()
if a_buscar.startswith(pclases.PREFIJO_ROLLO):
try:
objeto = pclases.Rollo.select(
pclases.Rollo.q.codigo == a_buscar)[0]
except IndexError:
objeto = self.buscar_rollo(a_buscar[1:])
elif a_buscar.startswith(pclases.PREFIJO_BALA):
try:
objeto = pclases.Bala.select(
pclases.Bala.q.codigo == a_buscar)[0]
except IndexError:
objeto = self.buscar_bala(a_buscar[1:])
elif a_buscar.startswith(pclases.PREFIJO_LOTECEM):
try:
loteCem = pclases.LoteCem.select(
pclases.LoteCem.q.codigo == a_buscar)[0]
except IndexError:
utils.dialogo_info(titulo = "LOTE NO ENCONTRADO",
texto = "El lote de fibra de cemento %s no se encontró."
% (a_buscar),
padre = self.wids['ventana'])
loteCem = None
objeto = loteCem
elif a_buscar.startswith(pclases.PREFIJO_LOTE):
try:
|
pmaigutyak/mp-shop
|
product_images/fields.py
|
Python
|
isc
| 188
| 0
|
from images import fields
from product_images
|
.models import ProductImage
class ImagesFormField(fields.ImagesFormField):
def __init__(self)
|
:
super().__init__(ProductImage)
|
Lucas-C/pre-commit
|
pre_commit/commands/install_uninstall.py
|
Python
|
mit
| 3,549
| 0
|
from __future__ import print_function
from __future__ import unicode_literals
import io
import os.path
import pipes
import sys
from pre_commit import output
from pre_commit.util import make_executable
from pre_commit.util import mkdirp
from pre_commit.util import resource_filename
# This is used to identify the hook file we install
PRIOR_HASHES = (
'4d9958c90bc262f47553e2c073f14cfe',
'd8ee923c46731b42cd95cc869add4062',
'49fd668cb42069aa1b6048464be5d395',
'79f09a650522a87b0da91
|
5d0d983b2de',
'e358c9dae00eac5d06b38dfdb1e33a8c',
)
CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'
|
def is_our_script(filename):
if not os.path.exists(filename):
return False
contents = io.open(filename).read()
return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)
def install(
runner, overwrite=False, hooks=False, hook_type='pre-commit',
skip_on_missing_conf=False,
):
"""Install the pre-commit hooks."""
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
mkdirp(os.path.dirname(hook_path))
# If we have an existing hook, move it to pre-commit.legacy
if os.path.lexists(hook_path) and not is_our_script(hook_path):
os.rename(hook_path, legacy_path)
# If we specify overwrite, we simply delete the legacy file
if overwrite and os.path.exists(legacy_path):
os.remove(legacy_path)
elif os.path.exists(legacy_path):
output.write_line(
'Running in migration mode with existing hooks at {}\n'
'Use -f to use only pre-commit.'.format(
legacy_path,
),
)
with io.open(hook_path, 'w') as pre_commit_file_obj:
if hook_type == 'pre-push':
with io.open(resource_filename('pre-push-tmpl')) as f:
hook_specific_contents = f.read()
elif hook_type == 'commit-msg':
with io.open(resource_filename('commit-msg-tmpl')) as f:
hook_specific_contents = f.read()
elif hook_type == 'pre-commit':
hook_specific_contents = ''
else:
raise AssertionError('Unknown hook type: {}'.format(hook_type))
skip_on_missing_conf = 'true' if skip_on_missing_conf else 'false'
contents = io.open(resource_filename('hook-tmpl')).read().format(
sys_executable=pipes.quote(sys.executable),
hook_type=hook_type,
hook_specific=hook_specific_contents,
config_file=runner.config_file,
skip_on_missing_conf=skip_on_missing_conf,
)
pre_commit_file_obj.write(contents)
make_executable(hook_path)
output.write_line('pre-commit installed at {}'.format(hook_path))
# If they requested we install all of the hooks, do so.
if hooks:
install_hooks(runner)
return 0
def install_hooks(runner):
for repository in runner.repositories:
repository.require_installed()
def uninstall(runner, hook_type='pre-commit'):
"""Uninstall the pre-commit hooks."""
hook_path = runner.get_hook_path(hook_type)
legacy_path = hook_path + '.legacy'
# If our file doesn't exist or it isn't ours, gtfo.
if not os.path.exists(hook_path) or not is_our_script(hook_path):
return 0
os.remove(hook_path)
output.write_line('{} uninstalled'.format(hook_type))
if os.path.exists(legacy_path):
os.rename(legacy_path, hook_path)
output.write_line('Restored previous hooks to {}'.format(hook_path))
return 0
|
jackchi/interview-prep
|
leetcode/meeting_rooms.py
|
Python
|
mit
| 61
| 0.098361
|
intervals =
|
[[10,20],[6,15],[0,2
|
2]]
print(sorted(intervals))
|
AdaHeads/Hosted-Telephone-Reception-System
|
use-cases/.patterns/adjust_recipients/test.py
|
Python
|
gpl-3.0
| 217
| 0.041667
|
self.Step (Message = "Receptionist-N ->> Klient-N [genvej: fokus-modtagerliste] (
|
måske)")
sel
|
f.Step (Message = "Receptionist-N ->> Klient-N [retter modtagerlisten]")
|
ahmadRagheb/goldenHR
|
erpnext/accounts/doctype/money_transfere/money_transfere.py
|
Python
|
gpl-3.0
| 4,294
| 0.034958
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils.data import flt, nowdate, getdate, cint
class MoneyTransfere(Document):
def on_submit(self):
self.validate_transfere()
def validate(self):
self.get_dummy_accounts()
def get_dummy_accounts(self):
dummy_to = frappe.db.get_values("Account", {"name": "حساب استلام من"+" - "+self.from_company + " - "+self.abbr_to,
"company": self.to_company,
"parent_account":"حساب استلام من"+" - "+self.abbr_to })
self.dummy_to=dummy_to[0][0]
dummy_from = frappe.db.get_values("Account", {"name": "حساب ارسال الي"+" - "+self.to_company + " - "+self.abbr,
"company": self.from_company,
"parent_account":"حساب ارسال"+" - "+self.abbr })
self.dummy_from=dummy_from[0][0]
def before_cancel(self):
pe = frappe.get_value("Payment Entry", filters = {"transfere_reference": self.name}, fieldname = "name")
if pe:
pe_doc = frappe.get_doc("Payment Entry", pe)
pe_doc.cancel()
je = frappe.get_value("Journal Entry Account", filters = {"reference_name": self.name}, fieldname = "parent")
if je:
je_doc = frappe.get_doc("Journal Entry", je)
je_doc.cancel()
def validate_transfere(self):
if self.from_company != self.to_company:
# sending_account = "حساب ارسال الى " + self.to_company
# receiving_account = "حساب استلام من " + self.from_company
# self.add_account_for_company(sending_account, self.to_company, "Liability")
# self.add_account_for_company(receiving_account, self.from_company, "Expense")
self.add_payment_entry(self.from_account, self.dummy_from, self.from_company)
self.add_journal_entry(self.to_account,self.dummy_to, self.to_company)
else:
self.add_payment_entry(self.from_account, self.to_account, self.from_company)
def add_account_for_company(self, account, company, r_type):
pass
# pacc_name = ""
# if r_type == "Expense":
# pacc_name = "حساب ارسال - E"
# elif r_type == "Liability":
# pacc_name = "حساب استقبال - o"
# # if not frappe.db.exists("Account", pacc_name):
# # pacc = frappe.new_doc("Account")
# # pacc.account_name = pacc_name
# # p
|
acc.root_type = r_type
# # pacc.is_group = 1
# # pacc.parent_account = ""
# # pacc.company = company
# # pacc.flags.ignore_validate = True
# # pacc.insert()
# if not frappe.db.exists("Account", account):
# acc = frappe.new_doc("Account")
# acc.account_name = account
# acc.company = company
# acc.parent_account = pacc_name
# acc.is_group = 0
# acc.insert()
def add_payment_entry(self, paid_from, paid_to, company):
pe = frappe.new_doc("
|
Payment Entry")
pe.payment_type = "Internal Transfer"
pe.company = company
pe.paid_from = paid_from
pe.paid_to = paid_to
pe.paid_amount = self.transfered_amount
pe.received_amount = self.transfered_amount
pe.posting_date = nowdate()
pe.mode_of_payment = self.mode_of_payment
pe.transfere_reference = self.name
pe.insert()
pe.submit()
# pe.setup_party_account_field()
# pe.set_missing_values()
# pe.set_exchange_rate()
# pe.set_amounts()
# self.assertEquals(pe.difference_amount, 500)
# pe.append("deductions", {
# "account": "_Test Exchange Gain/Loss - _TC",
# "cost_center": "_Test Cost Center - _TC",
# "amount": 500
# })
def add_journal_entry(self, account1, account2, company):
default_cost = frappe.get_value("Company", filters = {"name":company}, fieldname = "cost_center")
jv = frappe.new_doc("Journal Entry")
jv.posting_date = nowdate()
jv.company = company
jv.voucher_type = "Opening Entry"
jv.set("accounts", [
{
"account": account2,
"credit_in_account_currency": self.transfered_amount,
"cost_center": default_cost,
"reference_type": "Money Transfere",
"reference_name": self.name
}, {
"account": account1,
"debit_in_account_currency": self.transfered_amount,
"cost_center": default_cost,
"reference_type": "Money Transfere",
"reference_name": self.name
}
])
jv.insert()
jv.submit()
|
oscaro/django
|
tests/urlpatterns_reverse/urls.py
|
Python
|
bsd-3-clause
| 3,830
| 0.003394
|
from django.conf.urls import patterns, url, include
from .views import empty_view, empty_view_partial, empty_view_wrapped, absolute_kwargs_view
other_patterns = patterns('',
url(r'non_path_include/$', empty_view, name='non_path_include'),
url(r'nested_path/$', 'urlpatterns_reverse.views.nested_view'),
)
urlpatterns = patterns('',
url(r'^places/(\d+)/$', empty_view, name='places'),
url(r'^places?/$', empty_view, name="places?"),
url(r'^places+/$', empty_view, name="places+"),
url(r'^places*/$', empty_view, name="places*"),
url(r'^(?:places/)?$', empty_view, name="places2?"),
url(r'^(?:places/)+$', empty_view, name="places2+"),
url(r'^(?:places/)*$', empty_view, name="places2*"),
url(r'^places/(\d+|[a-z_]+)/', empty_view, name="places3"),
url(r'^places/(?P<id>\d+)/$', empty_view, name="places4"),
url(r'^people/(?P<name>\w+)/$', empty_view, name="people"),
url(r'^people/(?:name/)', empty_view, name="people2"),
url(r'^people/(?:name/(\w+)/)?', empty_view, name="people2a"),
url(r'^people/(?P<name>\w+)-(?P=name)/$', empty_view, name="people_backref"),
url(r'^optional/(?P<name>.*)/(?:.+/)?', empty_view, name="optional"),
url(r'^hardcoded/$', empty_view, name="hardcoded"),
url(r'^hardcoded/doc\.pdf$', empty_view, name="hardcoded2"),
url(r'^people/(?P<state>\w\w)/(?P<name>\w+)/$', empty_view, name="people3"),
url(r'^people/(?P<state>\w\w)/(?P<name>\d)/$', empty_view, name="people4"),
url(r'^people/((?P<state>\w\w)/test)?/(\w+)/$', empty_view, name="people6"),
url(r'^character_set/[abcdef0-9]/$', empty_view, name="range"),
url(r'^character_set/[\w]/$', empty_view, name="range2"),
url(r'^price/\$(\d+)/$', empty_view, name="price"),
url(r'^price/[$](\d+)/$', empty_view, name="price2"),
url(r'^price/[\$](\d+)/$', empty_view, name="price3"),
url(r'^product/(?P<product>\w+)\+\(\$(?P<price>\d+(\.\d+)?)\)/$', empty_view, name="product"),
url(r'^headlines/(?P<year>\d+)\.(?P<mont
|
h>\d+)\.(?P<day>\d+)/$', empty_view, name="headlines"),
url(r'^windows_path/(?P<drive_name>[A-Z]):\\(?P<path>.+)/$', empty_view, name="windows"),
url(r'^special_chars/(?P<chars>.+)/$', empty_view, name="special"),
url(r'^(?P<name>.+)/\d+/$', empty_view, name="mixed"),
url(r'^repeats/a{1,2}/$', empty_view, name="repeats"),
url(r'^repeats/a{2,4}/$', empty_v
|
iew, name="repeats2"),
url(r'^repeats/a{2}/$', empty_view, name="repeats3"),
url(r'^(?i)CaseInsensitive/(\w+)', empty_view, name="insensitive"),
url(r'^test/1/?', empty_view, name="test"),
url(r'^(?i)test/2/?$', empty_view, name="test2"),
url(r'^outer/(?P<outer>\d+)/', include('urlpatterns_reverse.included_urls')),
url(r'^outer-no-kwargs/(\d+)/', include('urlpatterns_reverse.included_no_kwargs_urls')),
url('', include('urlpatterns_reverse.extra_urls')),
# This is non-reversible, but we shouldn't blow up when parsing it.
url(r'^(?:foo|bar)(\w+)/$', empty_view, name="disjunction"),
# Partials should be fine.
url(r'^partial/', empty_view_partial, name="partial"),
url(r'^partial_wrapped/', empty_view_wrapped, name="partial_wrapped"),
# Regression views for #9038. See tests for more details
url(r'arg_view/$', 'kwargs_view'),
url(r'arg_view/(?P<arg1>\d+)/$', 'kwargs_view'),
url(r'absolute_arg_view/(?P<arg1>\d+)/$', absolute_kwargs_view),
url(r'absolute_arg_view/$', absolute_kwargs_view),
# Tests for #13154. Mixed syntax to test both ways of defining URLs.
url(r'defaults_view1/(?P<arg1>\d+)/', 'defaults_view', {'arg2': 1}, name='defaults'),
(r'defaults_view2/(?P<arg1>\d+)/', 'defaults_view', {'arg2': 2}, 'defaults'),
url('^includes/', include(other_patterns)),
# Security tests
url('(.+)/security/$', empty_view, name='security'),
)
|
jsquare/hikeplanner
|
src/settings.py
|
Python
|
gpl-2.0
| 5,672
| 0.001234
|
# Django settings for hikeplanner project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Jaime', 'jaime.m.mccandless@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'hikeplanner', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'postgres',
'PASSWORD': 'hiketime',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set th
|
is to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in
|
this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^#zyha@)nr_z=#ge2esa0kc1+1f56tfa-nuox5%!^+hqgo%7w*'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'src.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'src.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"hikes/templates"
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.gis',
'hikes',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
PaulWay/spacewalk
|
client/solaris/smartpm/smart/interfaces/up2date/interface.py
|
Python
|
gpl-2.0
| 19,239
| 0.001663
|
#
# Copyright (c) 2004 Conectiva, Inc.
# Copyright (c) 2005--2013 Red Hat, Inc.
#
# From code written by Gustavo Niemeyer <niemeyer@conectiva.com>
# Modified by Joel Martin <jmartin@redhat.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.interfaces.up2date.progress import Up2dateProgress
from smart.interfaces.up2date import rhnoptions
from smart.interface import Interface, getScreenWidth
from smart.util.strtools import sizeToStr, printColumns
from smart.const import OPTIONAL, ALWAYS, DEBUG
from smart.fetcher import Fetcher
from smart.report import Report
from smart import *
from smart.transaction import PolicyInstall
from rhn.client.rhnPackages import ServerSettings
from rhn.client import rhnAuth
import getpass
import sys
import os
import commands
class Up2dateInterface(Interface):
def __init__(self, ctrl):
Interface.__init__(self, ctrl)
self._progress = Up2dateProgress()
self._activestatus = False
def getPackages(self, reload=True):
if reload: self._ctrl.reloadChannels()
cache = self._ctrl.getCache()
pkgs = cache.getPackages()
return pkgs
def getRHNPackages(self, reload=True, latest=False):
if reload: self._ctrl.reloadChannels()
cache = self._ctrl.getCache()
pkgs = cache.getPackages()
retpkgs = []
patchlist = []
status, output = commands.getstatusoutput("showrev -p")
if status == 0:
if type(output) == type(""):
output = output.splitlines()
for line in output:
# Patch: 190001-01 Obsoletes: Requires: Incompatibles: Packages: Zpkg2, Zpkg1
if not line.startswith("Patch:"):
continue
parts = line.split()
patchlist.append("patch-solaris-" + parts[1])
for pkg in pkgs:
if pkg.name.startswith("patch-solaris") and not pkg.installed:
matchname = pkg.name + "-" + pkg.version
for patchname in patchlist:
if matchname.startswith(patchname + "-"):
|
pkg.installed |= 1
for loader in pkg.loaders:
channel = loader.
|
getChannel()
if channel.getType() == "solaris-rhn":
retpkgs.append(pkg)
if latest:
apkgs = []
for pkg in retpkgs:
found = False
for apkg in apkgs:
if pkg.name == apkg.name:
found = True
if pkg > apkg:
apkgs.remove(apkg)
apkgs.append(pkg)
break
if not found:
apkgs.append(pkg)
retpkgs = apkgs
return retpkgs
def run(self, command=None, argv=None):
# argv is the list of packages to install if any
#print "Up2date run() command: ", command, "pkgs: ", argv
action = command["action"]
if command.has_key("channel"):
rhnoptions.setOption("channel", command["channel"])
if command.has_key("global_zone"):
rhnoptions.setOption("global_zone", command["global_zone"])
if command.has_key("admin"):
rhnoptions.setOption("admin", command["admin"])
if command.has_key("response"):
rhnoptions.setOption("response", command["response"])
rhnoptions.setOption("action", action)
result = None
if action in ("", "installall"):
if action == "":
pkgs = argv
if action == "installall":
pkgs = self.getRHNPackages(latest=True)
pkgs = [str(x) for x in pkgs if not x.installed]
import smart.commands.install as install
opts = install.parse_options([])
opts.args = pkgs
opts.yes = True
# Use a custom policy for breaking ties with patches.
if command.has_key("act_native"):
result = install.main(self._ctrl, opts, RHNSolarisGreedyPolicyInstall)
else:
result = install.main(self._ctrl, opts, RHNSolarisPolicyInstall)
if action == "list":
pkgs = self.getRHNPackages()
print _("""
Name Version Rel
----------------------------------------------------------""")
for pkg in pkgs:
if pkg.installed: continue
found = False
for upgs in pkg.upgrades:
for prv in upgs.providedby:
for p in prv.packages:
if p.installed:
found = True
if found:
parts = pkg.version.split("-")
version = parts[0]
release = "-".join(parts[1:])
print "%-40s%-15s%-20s" % (pkg.name, version, release)
# bug 165383: run the packages command after an install
if action in ("", "installall", "packages"):
from rhn.client import rhnPackages
import string
pkglist = self.getPackages()
pkgs = []
#8/8/2005 wregglej 165046
#make sure patches get refreshed by checking to see if they're installed
#and placing them in the pkgs list.
patchlist = []
status, output = commands.getstatusoutput("showrev -p")
if status == 0:
if type(output) == type(""):
output = output.splitlines()
for line in output:
# Patch: 190001-01 Obsoletes: Requires: Incompatibles: Packages: Zpkg2, Zpkg1
if not line.startswith("Patch:"):
continue
parts = line.split()
patchlist.append("patch-solaris-" + parts[1] + "-1")
for pkg in pkglist:
if pkg.name.startswith("patch-solaris"):
matchname = pkg.name + "-" + pkg.version
for patchname in patchlist:
if string.find(matchname, patchname) > -1:
parts = string.split(pkg.version, "-")
version = parts[0]
revision = string.join(parts[1:]) or 1
arch = "sparc-solaris-patch"
pkgs.append((pkg.name, version, revision, "", arch))
elif pkg.installed:
# We won't be listing patch clusters: once installed
# they are just patches
if pkg.name.startswith("patch-solaris"):
arch = "sparc-solaris-patch"
else:
arch = "sparc-solaris"
parts = string.split(pkg.version, "-")
version = string.join(parts[0:-1], "-")
revision = parts[-1] or 1
# bug 164540: removed hard-coded '0' epoch
pkgs.append((pkg.name, version, revision, "", arch))
rhnPackages.refreshPackages(pkgs)
# FIXME (20050415): Proper output method
print "Package list refresh successful"
if action == "hardware":
from rhn
|
rubenk/cobbler
|
cobbler/server/xmlrpclib2.py
|
Python
|
gpl-2.0
| 9,102
| 0.002857
|
#============================================================================
# This library
|
is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have rec
|
eived a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 Anthony Liguori <aliguori@us.ibm.com>
# Copyright (C) 2006 XenSource Inc.
# Copyright (C) 2007 Red Hat Inc., Michael DeHaan <mdehaan@redhat.com>
#============================================================================
"""
An enhanced XML-RPC client/server interface for Python.
"""
import re
import fcntl
from types import *
import os
import errno
import traceback
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import SocketServer
import xmlrpclib, socket, os, stat
#import mkdir
#
# Convert all integers to strings as described in the Xen API
#
def stringify(value):
if isinstance(value, long) or \
(isinstance(value, int) and not isinstance(value, bool)):
return str(value)
elif isinstance(value, dict):
new_value = {}
for k, v in value.items():
new_value[stringify(k)] = stringify(v)
return new_value
elif isinstance(value, (tuple, list)):
return [stringify(v) for v in value]
else:
return value
# We're forced to subclass the RequestHandler class so that we can work around
# some bugs in Keep-Alive handling and also enabled it by default
class XMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
protocol_version = "HTTP/1.1"
def __init__(self, request, client_address, server):
SimpleXMLRPCRequestHandler.__init__(self, request, client_address,
server)
# this is inspired by SimpleXMLRPCRequestHandler's do_POST but differs
# in a few non-trivial ways
# 1) we never generate internal server errors. We let the exception
# propagate so that it shows up in the Xend debug logs
# 2) we don't bother checking for a _dispatch function since we don't
# use one
def do_POST(self):
addrport = self.client_address
#if not connection.hostAllowed(addrport, self.hosts_allowed):
# self.connection.shutdown(1)
# return
data = self.rfile.read(int(self.headers["content-length"]))
rsp = self.server._marshaled_dispatch(data)
self.send_response(200)
self.send_header("Content-Type", "text/xml")
self.send_header("Content-Length", str(len(rsp)))
self.end_headers()
self.wfile.write(rsp)
self.wfile.flush()
#if self.close_connection == 1:
# self.connection.shutdown(1)
def parents(dir, perms, enforcePermissions = False):
"""
Ensure that the given directory exists, creating it if necessary, but not
complaining if it's already there.
@param dir The directory name.
@param perms One of the stat.S_ constants.
@param enforcePermissions Enforce our ownership and the given permissions,
even if the directory pre-existed with different ones.
"""
# Catch the exception here, rather than checking for the directory's
# existence first, to avoid races.
try:
os.makedirs(dir, perms)
except OSError, exn:
if exn.args[0] != errno.EEXIST or not os.path.isdir(dir):
raise
if enforcePermissions:
os.chown(dir, os.geteuid(), os.getegid())
os.chmod(dir, stat.S_IRWXU)
# This is a base XML-RPC server for TCP. It sets allow_reuse_address to
# true, and has an improved marshaller that logs and serializes exceptions.
class TCPXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, addr, requestHandler=None,
logRequests = 1):
if requestHandler is None:
requestHandler = XMLRPCRequestHandler
SimpleXMLRPCServer.__init__(self, addr,
(lambda x, y, z:
requestHandler(x, y, z)),
logRequests)
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
def get_request(self):
(client, addr) = SimpleXMLRPCServer.get_request(self)
flags = fcntl.fcntl(client.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(client.fileno(), fcntl.F_SETFD, flags)
return (client, addr)
def _marshaled_dispatch(self, data, dispatch_method = None):
params, method = xmlrpclib.loads(data)
if False:
# Enable this block of code to exit immediately without sending
# a response. This allows you to test client-side crash handling.
import sys
sys.exit(1)
try:
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
if (response is None or
not isinstance(response, dict) or
'Status' not in response):
#log.exception('Internal error handling %s: Invalid result %s',
# method, response)
response = { "Status": "Failure",
"ErrorDescription":
['INTERNAL_ERROR',
'Invalid result %s handling %s' %
(response, method)]}
# With either Unicode or normal strings, we can only transmit
# \t, \n, \r, \u0020-\ud7ff, \ue000-\ufffd, and \u10000-\u10ffff
# in an XML document. xmlrpclib does not escape these values
# properly, and then breaks when it comes to parse the document.
# To hack around this problem, we use repr here and exec above
# to transmit the string using Python encoding.
# Thanks to David Mertz <mertz@gnosis.cx> for the trick (buried
# in xml_pickle.py).
if isinstance(response, StringTypes):
response = repr(response)[1:-1]
response = (response,)
response = xmlrpclib.dumps(response,
methodresponse=1,
allow_none=1)
except Exception, exn:
try:
#if self.xenapi:
# if _is_not_supported(exn):
# errdesc = ['MESSAGE_METHOD_UNKNOWN', method]
# else:
# #log.exception('Internal error handling %s', method)
# errdesc = ['INTERNAL_ERROR', str(exn)]
#
# response = xmlrpclib.dumps(
# ({ "Status": "Failure",
# "ErrorDescription": errdesc },),
# methodresponse = 1)
#else:
# import xen.xend.XendClient
if isinstance(exn, xmlrpclib.Fault):
response = xmlrpclib.dumps(exn)
else:
# log.exception('Internal error handling %s', method)
response = xmlrpclib.dumps(
xmlrpclib.Fault(101, str(exn)))
except Exception, exn2:
# FIXME
traceback.print_exc()
return response
notSupportedRE = re.compile(r'method "(.*)" is not supported')
def _is_not_supported(exn):
t
|
stev-0/bustimes.org.uk
|
busstops/management/commands/import_accessibility.py
|
Python
|
mpl-2.0
| 1,772
| 0.001693
|
import os
import zipfile
import csv
from django.core.management.base import BaseCommand
from django.db import transaction
from django.conf import settings
from ...models import Service
def get_service(row):
for region in 'EA', 'EM', 'WM', 'SE', 'SW':
col = 'TNDS-' + region
if row[col]:
return Service.objects.filter(region=region, service_code__endswith=''.join(row[col].split('-')[:-1]))
for region in 'S', 'Y', 'NE':
col = 'TNDS-' + region
if row[col]:
return Service.objects.filter(re
|
gion=region, service_code=row[col])
if row['TNDS-NW']:
return Service.objects.filter(region=region, service_code__endswith=''
|
.join(row['TNDS-NW'].split('_')[:-1]))
def handle_file(open_file):
for row in csv.DictReader(line.decode() for line in open_file):
service = get_service(row)
if service:
if row['HighFloor'] == 'LF':
low_floor = True
elif row['HighFloor'] == 'HF':
low_floor = False
else:
low_floor = None
service.update(wheelchair=row['Wheelchair Access'] == 'TRUE',
low_floor=low_floor,
assistance_service=row['Assistance Service'] == 'TRUE',
mobility_scooter=row['MobilityScooter'] == 'TRUE')
class Command(BaseCommand):
@transaction.atomic
def handle(self, *args, **options):
path = os.path.join(settings.DATA_DIR, 'accessibility-data.zip')
with zipfile.ZipFile(path) as archive:
for path in archive.namelist():
if 'IF145' in path:
with archive.open(path, 'r') as open_file:
handle_file(open_file)
|
vdloo/raptiformica-map
|
raptiformica_map/graph.py
|
Python
|
gpl-3.0
| 1,111
| 0
|
import re
CJDNS_IP_REGEX = re.compile(r'^fc[0-9a-f]{2}(:[0-9a-f]{4}){7}$', re.IGNORECASE)
class Node(object):
|
def __init__(self, ip, version=None, label=None):
if not valid_cjdns_ip(ip):
raise ValueError('Invalid IP address')
if not valid_version(version):
raise ValueError('Invalid version')
self.ip = ip
self.version = int(version)
self.label = ip[-4:] or label
def __lt__(self, b):
return self.ip < b.ip
de
|
f __repr__(self):
return 'Node(ip="%s", version=%s, label="%s")' % (
self.ip,
self.version,
self.label)
class Edge(object):
def __init__(self, a, b):
self.a, self.b = sorted([a, b])
def __eq__(self, that):
return self.a.ip == that.a.ip and self.b.ip == that.b.ip
def __repr__(self):
return 'Edge(a.ip="{}", b.ip="{}")'.format(self.a.ip, self.b.ip)
def valid_cjdns_ip(ip):
return CJDNS_IP_REGEX.match(ip)
def valid_version(version):
try:
return int(version) < 30
except ValueError:
return False
|
jairoserrano/simpledevops
|
models.py
|
Python
|
apache-2.0
| 425
| 0.002353
|
from app import db
class Pets(db.Model):
__tablename__ = 'pets'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), unique=True)
color = db.Column(db.String(30))
pet = db.Column(db.String(
|
10))
def __init__(self, name, color, pet):
self.name = name
self.color = color
s
|
elf.pet = pet
def __repr__(self):
return '<id {}>'.format(self.id)
|
RobinJoe/Docs
|
doc/conf.py
|
Python
|
gpl-3.0
| 8,757
| 0
|
# -*- coding: utf-8 -*-
#
# Rackspace Developer Documentation documentation build configuration file,
# created by sphinx-quickstart on Thu Mar 6 14:14:55 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys
# import os
from pygments.lexers.web import PhpLexer
from sphinx.highlighting import lexers
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.ifconfig',
'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Document Repository'
copyright = '2016, Joseph Robinson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '1'
# The full version, including alpha/beta/rc tags.
# release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'samples', 'README.rst', 'common/*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to templ
|
ate names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is s
|
plit into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'document-repository'
# this will change the 'paragraph' character to '#'
html_add_permalinks = '#'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [('index', 'documentation-repository.tex',
'Documentation Repository',
'Joseph Robinson', 'manual')]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'documentation-repository',
'Documentation Repository', ['Joseph Robinson'], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [('index', 'documentation-repository',
|
George-Gate/CERN-Video-Downloader
|
PerformDownload.py
|
Python
|
mit
| 9,240
| 0.018831
|
import urllib.request
import os
'''if set smallFileMode to True, the program will use simpleDownload method, which may be faster for many small files but do not support break point resume.'''
smallFileMode=False
def showProgress(blockNum, blockSize, totalSize):
'''Called by urllib.request.urlretrieve
return the progress of downloading'''
# display download progress
if blockNum*blockSize>=totalSize :
print ('\rDownload Finished!(%.2f'%round(blockNum*blockSize/2**20,2)
,'/','%.2f MB)'%round(totalSize/2**20,2))
else:
if totalSize!=-1 :
print ('\r','%.2f'%round(blockNum*blockSize/2**20,2),
'/','%.2f'%round(totalSize/2**20,2),
'MB ','%.2f'%round(blockNum*blockSize/totalSize*100,2),'%',end='')
else:
print ('\r','%.2f'%round(blockNum*blockSize/2**20,2),
'MB Downloaded.',end='')
# end {showProgress}
def recordToFile(record,msg):
'''Record msg to file, using binary mode'''
if len(msg.encode())>50:
while len(msg.encode())>47:
msg=msg[0:-int((len(msg.encode())-47)/2)-1]
msg=msg+'...'
written=record.write( '{: <50}'.format(msg).encode() )
record.seek(-written,1)
# end {recordToFile}
def checkDir(filename):
'''To check whether the parent of filename exists. If not, create it.
filename should be a string'''
from pathlib import Path
p=Path(filename)
try:
if not p.parent.exists():
p.parent.mkdir(parents=True)
except:
print ('Error occurred when creating Directory:',str(p.parent))
# end {checkDir}
def download(url, filename):
''' Download url to filename, support break point resume. '''
import errno
import socket
import time
import configparser
# get url info
urlHandler = urllib.request.urlopen( url,timeout=10 )
headers = urlHandler.info()
size = int(headers.get('Content-Length'))
lastmodified=headers.get('Last-Modified')
# access download info file
infoname=filename+'.lock'
info=configparser.ConfigParser()
if os.path.exists(infoname):
info.read(infoname)
try:
if (info.get('FileInfo','size')!=str(size) or
info.get('FileInfo','url')!=str(url) or
info.get('FileInfo','lastmodified')!=str(lastmodified)):
info.remove_section('FileInfo')
print('File changed, restart download.')
except:
info.remove_section('FileInfo')
print('.lock file damaged, restart download.')
# end if
# decide whether to resume or restart
if not info.has_section('FileInfo'):
info.add_section('FileInfo')
info.set('FileInfo','size',str(size))
info.set('FileInfo','url',str(url))
info.set('FileInfo','lastmodified',str(lastmodified))
with open(infoname,'w') as f:
info.write(f)
# delete existing file
open(filename,'wb').close()
# rebuild start point
try:
downloaded = os.path.getsize(filename )
except OSError:
downloaded = 0
startpoint = downloaded
# start download
connectionError=True
resetCounter=0
while connectionError and resetCounter<10:
connectionError=False
try:
if startpoint < size:
oneTimeSize = 65535 #64KB/time
urlHandler = urllib.request.Request(url)
urlHandler.add_header("Range", "bytes=%d-%d" % (startpoint, size))
urlHandler = urllib.request.urlopen(urlHandler,timeout=10)
data = urlHandler.read( oneTimeSize )
with open( filename, 'ab+' ) as filehandle:
while data:
filehandle.write( data )
downloaded += len( data )
showProgress(1, downloaded, size)
data = urlHandler.read( oneTimeSize )
# end if
except urllib.error.HTTPError as errinfo:
# HTTP Error
if errinfo.code==errno.ECONNRESET:
# Connection reset by peer, connect again
connectionError=True
resetCounter+=1
else:
raise
except urllib.error.URLError as errinfo:
# URL Error
if (isinstance(errinfo.reason,socket.gaierror) and
errinfo.reason.errno==-2):
# Name or service not known, usually caused by internet break or wrong server address
connectionError=True
resetCounter+=1
time.sleep(10)
else:
raise
except socket.timeout:
# request timeout
connectionError=True
resetCounter+=1
# end try
# end while
# if resetCounter>10 and there is a connectionError then raise it
if connectionError:
raise Exception('Connection Error')
# check if download finished successfully
try:
downloaded = os.path.getsize(filename )
except OSError:
downloaded = 0
if downloaded==size:
# remove info file
os.remove(infoname)
return 'Success'
elif downloaded>size:
os.remove(infoname)
return 'The size of file downloaded is bigger than that on server.'
else:
return ('Download Not Finished! The size of file downloaded is smaller than that on server.'
' If this error continues, please try delete the file downloaded.')
#end {def download}
def simpleDownload(url, filename):
'''Simple download method, do not suppot break point resume, but may be faster for small files'''
urllib.request.urlretrieve(url, filename, showProgress)
return 'Success'
# end {def simpleDownload}
#-----------------------------------------------------------------------------------------------------------------
# main procedure start from here
# get current working directory
#currentPath=os.path.dirname(performDownload)
#os.chdir(str(currentPath))
currentPath=os.getcwd()
# read download info from 'downloadList.csv'
with open('downloadList.csv') as f:
downloadlist=f.readlines()
onError=False # error flag
# open 'downloadList.csv' for status maintenance
with open('downloadList.csv','rb+') as record:
for item in downloadlist:
if item=='\n': continue
# parse item
status, localname, url = item[:-1].split(';')
status = status.strip(' ').upper()
# check status for downloading
if status in {'QUEUED','PAUSED',''}:
# record status
recordToFile(record,'Downloading')
# start download
print ('Begin to download',url)
print ('Save to:',localname)
checkDir(currentPath+localname[1:]) # check if parent dir exists
try:
if smallFileMode:
result=simpleDownload(url,
currentPath+localname[1:])
else:
result=download(url,
|
currentPath+localname[1:])
# if download not success, raise exception
if result!='Success':
raise Exception(result)
except urllib.error.HTTPError as errinfo:
# 404 Not Found
print ('\r'+str(errinfo))
recordToFile(r
|
ecord,str(errinfo))
except KeyboardInterrupt as errinfo:
# Ctrl+C Interrupt
print ('\rDownload Abort!'+20*' ')
if smallFileMode:
# reset status to 'Queued' since smallFileMode don't support break point resume
recordToFile(record,'Queued')
else:
# set status to 'Paused'
recordToFile(record,'Paused')
onError=Tr
|
jkorell/PTVS
|
Python/Tests/TestData/DebuggerProject/A/module1.py
|
Python
|
apache-2.0
| 67
| 0
|
def do_something():
for i in range(100):
|
print(i)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.