repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
sangh/LaserShow
|
pyglet-hg/experimental/swigtypes/parse.py
|
Python
|
bsd-3-clause
| 12,636
| 0.002137
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import gzip
import cPickle as marshal
import optparse
import os
import sys
import xml.sax
def parse_type(type_string):
'''Get a tuple of the type components for a SWIG-formatted type.
For example, given the type "p.f(p.struct _XExtData).int",
return ('int', ('f', ('struct _XExtData', 'p'),), 'p')
Qualifiers are ignored (removed).
'''
# Scan the type string left-to-right
buf = ''
stack = [()]
def flush(): # buf = flush()
if buf:
stack[-1] = stack[-1] + (buf,)
return ''
def push():
stack.append(())
def pop():
item = finalize(stack.pop())
if item is not None:
stack[-1] = stack[-1] + (item,)
def finalize(item):
assert type(item) is tuple
if not item:
# Empty tuple is dropped (empty param list)
return
elif item[0] == 'q':
# Discard qualifiers
return
# Reverse (puts pointers at end)
item = item[::-1]
# Reverse arguments of function
if item[-1] == 'f':
item = item[::-1]
# Empty out (void) param list
if item == ('f', ('void',)):
item = ('f',)
# Varargs encoding
elif item[-1] == 'v':
item = '...'
# Array encoding
elif item[-1] == 'a':
try:
item = ('a',) + tuple(int(j[0]) for j in item[-2::-1])
except (TypeError, ValueError):
# TODO arrays of dimension given by sizeof expression
item = ('a', 0)
# Remove one level of indirection for function types (CFUNCTYPE is
# already a pointer)
off = 0
for i, j in enumerate(item):
if type(j) is tuple and j and j[0] == 'f':
item = item[:i+1+off] + item[i+2+off:]
off -= 1
return item
for c in type_string:
if c == '.':
buf = flush()
elif c == '(':
push() # Push param list
buf = flush()
|
push() # Push item
elif c == ',':
buf = flush()
pop() # Pop item
push() # Push item
elif c == ')':
buf = flush()
pop() # Pop item
|
pop() # Pop param list
else:
buf += c
flush()
type_tuple = finalize(stack[0])
return type_tuple
class SwigInterfaceHandler(object):
def __init__(self):
self.name = None
self.cdecls = []
self.constants = []
def attribute(self, attrs):
if attrs['name'] == 'name':
self.name = str(attrs['value'])
def typemap(self, attrs):
return IgnoreElementHandler()
def cdecl(self, attrs):
handler = CDeclHandler(attrs)
self.cdecls.append(handler)
return handler
def constant(self, attrs):
handler = ConstantHandler(attrs)
self.constants.append(handler)
return handler
def class_(self, attrs):
handler = ClassHandler(attrs)
self.cdecls.append(handler)
return handler
def classforward(self, attrs):
handler = ClassForwardHandler(attrs)
self.cdecls.append(handler)
return handler
def enum(self, attrs):
handler = EnumHandler(attrs)
self.cdecls.append(handler)
return handler
def get_map(self):
map = {}
for cdecl in self.cdecls:
# ('typedef', type)
if cdecl.kind == 'typedef':
map[cdecl.name] = (cdecl.kind, cdecl.get_type(with_decl=True))
# ('enum', items)
elif cdecl.kind == 'enum':
enum = (cdecl.kind, cdecl.get_items())
map[cdecl.kind + ' ' + cdecl.name] = enum
map[cdecl.get_tdname()] = enum
# ('struct', variables)
# ('union', variables)
elif cdecl.kind in ('struct', 'union'):
class_ = (cdecl.kind, cdecl.get_variables())
map[cdecl.kind + ' ' + cdecl.name] = class_
map[cdecl.get_tdname()] = class_
# ('function', type)
elif cdecl.kind == 'function':
map[cdecl.name] = (cdecl.kind, cdecl.get_type(with_decl=True))
# ('variable', type)
elif cdecl.kind == 'variable':
map[cdecl.name] = (cdecl.kind, cdecl.get_type())
else:
assert False, (cdecl.kind, cdecl.type, cdecl.name)
# Constants: ('constant', value)
for constant in self.constants:
map[constant.name] = ('constant', constant.get_value())
import pprint
pprint.pprint(map)
return map
class IgnoreElementHandler(object):
pass
class ConstantHandler(object):
name = None
value = None
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'value':
self.value = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_value(self):
if self.type in ('int', 'long'):
# Yes, ugly and bad -- most C int constants can also be
# parsed as Python expressions; e.g. "1L << 8".
return int(eval(self.value))
return self.value
class EnumHandler(object):
name = None
tdname = None
kind = 'enum'
unnamed = False
def __init__(self, attrs):
self.items = []
def attribute(self, attrs):
name = attrs['name']
if name == 'name' and not self.unnamed:
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
self.unnamed = True
elif name == 'tdname':
self.tdname = str(attrs['value'])
def enumitem(self, attrs):
handler = EnumItemHandler(attrs)
self.items.append(handler)
return handler
def get_items(self):
items = []
index = 0
for item in self.items:
try:
# TODO parse enumvalueex properly
index = int(item.value)
except ValueError:
index += 1
items.append((item.name, index))
return tuple(items)
def get_tdname(self):
if self.tdname:
return self.tdname
else:
return self.name
class EnumItemHandler(object):
name = None
value = None
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
elif name == 'enumvalueex':
self.value = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_value(self):
if self.type in ('int', 'long'):
# Yes, ugly and bad -- most C int constants can also be
# parsed as Python expressions; e.g. "1L << 8".
return int(eval(self.value))
return self.value
class CDeclHandler(object):
name = None
kind = None
type = None
decl = ''
params = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'kind':
self.kind = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
elif name == 'decl':
self.decl = str(attrs['value'])
def parmlist(self, attrs):
self.params = []
handler = ParmListHandler(attrs, self.params)
return handler
def get_params(self):
# (
|
LTS5/connectomeviewer
|
cviewer/plugins/cff2/ui/cnetwork_tree_node.py
|
Python
|
bsd-3-clause
| 2,792
| 0.011461
|
""" Specify the NetworkNode with its action, context-menus """
# Copyright (C) 2009-2010, Ecole Polytechnique Federale de Lausanne (EPFL) and
# University Hospital Center and University of Lausanne (UNIL-CHUV)
#
# Modified BSD License
# Standard library imports
import os
# Enthought library imports
from traits.api import Instance, Str, Any
from traitsui.api import TreeNode
from traitsui.menu import Menu, Action, Separator
# ConnectomeViewer imports
from cviewer.plugins.cff2.cnetwork import CNetwork
# Logging import
import logging
logger = logging.getLogger('root.'+__name__)
class CNetworkTreeNode(TreeNode):
# The object that contains the container ;^)
parent = Any
# the network associated with this node
node_for=[CNetwork]
# a default icons
# Name of group item icon
icon_group = Str('home.png')
# Name of leaf item icon
icon_item=Str('home.png')
# Name of opened group item icon
icon_open=Str('home.png')
# labels
label='dname'
###
# Private Traits
# activate / deactivate logic
# if the node is activated, this means that there exists a
# corresponding RenderManager instance
_ShowName = Instance(Action,
kw={'name': 'Show name',
'action': 'object.show_name',
'tooltip': 'Shows the network name'}, )
_ChangeParameters = Instance(Action,
|
kw={'name': 'Edge Parameters',
'action': 'object._edge_parameters',
'tooltip': 'Thresholding and Change Attributes',
'enabled_when' : 'object.loaded == True'}, )
|
_RenderMatrixAction = Instance(Action,
kw={'name': 'Connectome Matrix Viewer',
'action': 'object.invoke_matrix_viewer',
'tooltip':'View the connectivity matrices',
'enabled_when':'object.loaded == True'}, )
# the menu shown after right-click
menu = Instance(Menu, transient=True)
def get_children(self, object):
""" Get the object's children. """
pass
# Collate the window's views into categories.
#return object.surfaces + object.volumes + object.tracks
######################################################################
# Non-public interface
######################################################################
def _menu_default(self):
""" Standard menus for network nodes """
menu_actions = []
return Menu( *menu_actions)
|
TomAugspurger/pandas
|
pandas/tests/arithmetic/test_datetime64.py
|
Python
|
bsd-3-clause
| 90,010
| 0.000644
|
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import datetime, time, timedelta
from itertools import product, starmap
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.compat.numpy import np_datetime64_compat
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray, TimedeltaArray
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_invalid_addsub_type,
assert_invalid_comparison,
|
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
|
tz = tz_naive_fixture
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(self, other, tz_naive_fixture):
# We don't parametrize this over box_with_array because listlike
# other plays poorly with assert_invalid_comparison reversed checks
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
assert_invalid_comparison(dta, other, tm.to_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], pd.Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
xbox = box if box is not pd.Index else np.ndarray
ts = pd.Timestamp.now(tz)
ser = pd.Series([ts, pd.NaT])
obj = tm.box_expected(ser, box)
expected = pd.Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[pd.Timestamp("2011-01-01"), NaT, pd.Timestamp("2011-01-03")],
[NaT, NaT, pd.Timestamp("2011-01-03")],
),
(
[pd.Timedelta("1 days"), NaT, pd.Timedelta("3 days")],
[NaT, NaT, pd.Timedelta("3 days")],
),
(
[pd.Period("2011-01", freq="M"), NaT, pd.Period("2011-03", freq="M")],
[NaT, NaT, pd.Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons(self, dtype, index_or_series, reverse, pair):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
# Series, Index
expected = Series([False, False, True])
tm.assert_series_equal(left == right, expected)
expected = Series([True, True, False])
tm.assert_series_equal(left != right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left < right, expected)
expected = Series([False, False, False])
tm.assert_series_equal(left > right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
tm.assert_series_equal(left <= right, expected)
def test_comparison_invalid(self, tz_naive_fixture, box_with_array):
# GH#4968
# invalid date/int comparisons
tz = tz_naive_fixture
ser = Series(range(5))
ser2 = Series(pd.date_range("20010101", periods=5, tz=tz))
ser = tm.box_expected(ser, box_with_array)
ser2 = tm.box_expected(ser2, box_with_array)
assert_invalid_comparison(ser, ser2, box_with_array)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
if box_with_array is tm.to_array and dtype is object:
# dont bother testing ndarray comparison methods as this fails
# on older numpys (since they check object identity)
return
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box_with_array)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
|
111pontes/ydk-py
|
core/ydk/services/ietf_netconf.py
|
Python
|
apache-2.0
| 60,135
| 0.01485
|
""" ietf_netconf
NETCONF Protocol Data Types and Protocol Operations.
Copyright (c) 2011 IETF Trust and the persons identified as
the document authors. All rights reserved.
Redistribution and use in sour
|
ce and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD License
set forth in Section 4.c of the IETF Trust's Legal Provisions
Relating to IETF Documents
(http\://trustee.ietf.org/license\-info).
This version of this YANG module is part of RFC 6241; see
the RFC itself for full
|
legal notices.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class EditOperationTypeEnum(Enum):
"""
EditOperationTypeEnum
NETCONF 'operation' attribute values
.. data:: merge = 0
The configuration data identified by the
element containing this attribute is merged
with the configuration at the corresponding
level in the configuration datastore identified
by the target parameter.
.. data:: replace = 1
The configuration data identified by the element
containing this attribute replaces any related
configuration in the configuration datastore
identified by the target parameter. If no such
configuration data exists in the configuration
datastore, it is created. Unlike a
<copy-config> operation, which replaces the
entire target configuration, only the configuration
actually present in the config parameter is affected.
.. data:: create = 2
The configuration data identified by the element
containing this attribute is added to the
configuration if and only if the configuration
data does not already exist in the configuration
datastore. If the configuration data exists, an
<rpc-error> element is returned with an
<error-tag> value of 'data-exists'.
.. data:: delete = 3
The configuration data identified by the element
containing this attribute is deleted from the
configuration if and only if the configuration
data currently exists in the configuration
datastore. If the configuration data does not
exist, an <rpc-error> element is returned with
an <error-tag> value of 'data-missing'.
.. data:: remove = 4
The configuration data identified by the element
containing this attribute is deleted from the
configuration if the configuration
data currently exists in the configuration
datastore. If the configuration data does not
exist, the 'remove' operation is silently ignored
by the server.
"""
merge = 0
replace = 1
create = 2
delete = 3
remove = 4
@staticmethod
def _meta_info():
from ydk.services._meta import _ietf_netconf as meta
return meta._meta_table['EditOperationTypeEnum']
class ErrorSeverityTypeEnum(Enum):
"""
ErrorSeverityTypeEnum
NETCONF Error Severity
.. data:: error = 0
Error severity
.. data:: warning = 1
Warning severity
"""
error = 0
warning = 1
@staticmethod
def _meta_info():
from ydk.services._meta import _ietf_netconf as meta
return meta._meta_table['ErrorSeverityTypeEnum']
class ErrorTagTypeEnum(Enum):
"""
ErrorTagTypeEnum
NETCONF Error Tag
.. data:: in_use = 0
The request requires a resource that
already is in use.
.. data:: invalid_value = 1
The request specifies an unacceptable value for one
or more parameters.
.. data:: too_big = 2
The request or response (that would be generated) is
too large for the implementation to handle.
.. data:: missing_attribute = 3
An expected attribute is missing.
.. data:: bad_attribute = 4
An attribute value is not correct; e.g., wrong type,
out of range, pattern mismatch.
.. data:: unknown_attribute = 5
An unexpected attribute is present.
.. data:: missing_element = 6
An expected element is missing.
.. data:: bad_element = 7
An element value is not correct; e.g., wrong type,
out of range, pattern mismatch.
.. data:: unknown_element = 8
An unexpected element is present.
.. data:: unknown_namespace = 9
An unexpected namespace is present.
.. data:: access_denied = 10
Access to the requested protocol operation or
data model is denied because authorization failed.
.. data:: lock_denied = 11
Access to the requested lock is denied because the
lock is currently held by another entity.
.. data:: resource_denied = 12
Request could not be completed because of
insufficient resources.
.. data:: rollback_failed = 13
Request to roll back some configuration change (via
rollback-on-error or <discard-changes> operations)
was not completed for some reason.
.. data:: data_exists = 14
Request could not be completed because the relevant
data model content already exists. For example,
a 'create' operation was attempted on data that
already exists.
.. data:: data_missing = 15
Request could not be completed because the relevant
data model content does not exist. For example,
a 'delete' operation was attempted on
data that does not exist.
.. data:: operation_not_supported = 16
Request could not be completed because the requested
operation is not supported by this implementation.
.. data:: operation_failed = 17
Request could not be completed because the requested
operation failed for some reason not covered by
any other error condition.
.. data:: partial_operation = 18
This error-tag is obsolete, and SHOULD NOT be sent
by servers conforming to this document.
.. data:: malformed_message = 19
A message could not be handled because it failed to
be parsed correctly. For example, the message is not
well-formed XML or it uses an invalid character set.
"""
in_use = 0
invalid_value = 1
too_big = 2
missing_attribute = 3
bad_attribute = 4
unknown_attribute = 5
missing_element = 6
bad_element = 7
unknown_element = 8
unknown_namespace = 9
access_denied = 10
lock_denied = 11
resource_denied = 12
rollback_failed = 13
data_exists = 14
data_missing = 15
operation_not_supported = 16
operation_failed = 17
partial_operation = 18
malformed_message = 19
@staticmethod
def _meta_info():
from ydk.services._meta import _ietf_netconf as meta
return meta._meta_table['ErrorTagTypeEnum']
class GetConfigRpc(object):
"""
Retrieve all or part of a specified configuration.
.. attribute:: input
**type**\: :py:class:`Input <ietf_netconf.GetConfigRpc.Input>`
.. attribute:: output
**type**\: :py:class:`Output <ietf_netconf.GetConfigRpc.Output>`
"""
_prefix = 'nc'
_revision = '2011-06-01'
def __init__(self):
self.input = GetConfigRpc.Input()
self.input.parent = self
self.output = GetConfigRpc.Output()
self.output.parent = self
self.is_rpc = True
class Input(object):
"""
.. attribute:: filter
Subtree or XPath filter to use
**type**\: anyxml
.. attribute:: source
Particular configuration to retrieve
**type**\: :py:class:`Source <ietf_netconf.GetConfigRpc.Input.Source>`
.. attribute:: with_defaults
The explicit defaults processing mode requested
**type**\: :py:class:`WithDefaultsModeEnum <ietf_netconf_with_defaults.WithDefaultsModeEnum>`
"""
_prefix = 'nc'
_revision = '2011-06-01'
def __init__(self):
self.parent = None
se
|
ooblog/TSF1KEV
|
TSFpy/debug/sample_help.py
|
Python
|
mit
| 2,181
| 0.024759
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import sys
import os
os.chdir(sys.path[0])
sys.path.append('/mnt/sda2/github/TSF1KEV/TSFpy')
from TSF_io import *
#from TSF_Forth import *
from TSF_shuffle import *
from TSF_match import *
from TSF_calc import *
from TSF_time import *
TSF_Forth_init(TSF_io_argvs(),[TSF_shuffle_Initwords,TSF_match_Initwords,TSF_calc_Initwords,TSF_time_Initwords])
TSF_Forth_setTSF("TSF_Tab-Separated-Forth:",
"\t".join(["UTF-8","#TSF_encoding","replace:","#TSF_this","help:","#TSF_echothe","0","#TSF_fin."]),
TSF_style="T")
TSF_Forth_setTSF("help:",
|
"\t".join(["usage: ./TSF.py [command|file.tsf] [argv] ...",
"commands:",
" --help this commands view",
" --about about TSF UTF-8 text (Japanese) view\" ",
" --python TSF.tsf to Python.py view or save\" ",
" --helloworld \"Hello world 1 #TSF_echoN\" sample",
" --quine TSF_Forth_viewthey() Quine (self source) sample",
" --99beer 99 Bottles
|
of Beer sample",
" --fizzbuzz ([0]#3Z1~0)+([0]#5Z2~0) Fizz Buzz Fizz&Buzz sample",
" --zundoko Zun Zun Zun Zun Doko VeronCho sample",
" --fibonacci Fibonacci number 0,1,1,2,3,5,8,13,21,55... sample",
" --prime prime numbers 2,3,5,7,11,13,17,19,23,29... sample",
" --calcFX fractions calculator \"1/3-m1|2\"-> p5|6 sample",
" --calcDC fractions calculator \"1/3-m1|2\"-> 0.8333... sample",
" --calcKN fractions calculator \"1/3-m1|2\"-> 6 bunno 5 sample",
" --calender \"@000y@0m@0dm@wdec@0h@0n@0s\"-> TSF_time_getdaytime() sample"]),
TSF_style="N")
TSF_Forth_setTSF("replace:",
"\t".join(["replaceN:","#TSF_carbonthe","#TSF_calender","replaceN:","0","#TSF_pokethe","help:","replaceO:","replaceN:","#TSF_replacestacks"]),
TSF_style="T")
TSF_Forth_setTSF("replaceO:",
"\t".join(["TSF_time_getdaytime()"]),
TSF_style="N")
TSF_Forth_setTSF("replaceN:",
"\t".join(["@000y@0m@0dm@wdec@0h@0n@0s"]),
TSF_style="N")
TSF_Forth_addfin(TSF_io_argvs())
TSF_Forth_argvsleftcut(TSF_io_argvs(),1)
TSF_Forth_run()
|
clay584/dns-updater
|
dns-updater.py
|
Python
|
apache-2.0
| 6,247
| 0.024972
|
#!/usr/bin/python
# Import modules for CGI handling
import cgi
from IPy import parseAddress, IP
import re
from subprocess import Popen, PIPE
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
ip_address = form.getvalue('ip_address')
if form.getvalue('fqdn') == None:
fqdn = 'blank'
else:
fqdn = form.getvalue('fqdn')
action = form.getvalue('action')
password = form.getvalue('password')
#ip_address = '10.0.0.1'
#fqdn = 'test21'
def is_valid_ip(ip_address):
try:
ip = parseAddress(ip_address)
except Exception:
return False
if not ip:
return False
else:
return ip[0]
def get_in_addr(ip_address):
return IP(ip_address).reverseName()
def is_valid_fqdn(fqdn):
try:
if len(fqdn) > 255:
return False
except:
return False
if fqdn[-1] == ".":
fqdn = fqdn[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in fqdn.split("."))
def is_valid_action(action):
if action == 'add' or action == 'delete':
return True
else:
return False
def is_valid_password(password):
if password == 'Gt500gelf':
return True
else:
return False
def is_form_valid(ip_address, fqdn, action, password):
valid_fqdn = is_valid_fqdn(fqdn)
valid_ip = is_valid_ip(ip_address)
valid_action = is_valid_action(action)
valid_password = is_valid_password(password)
if valid_fqdn and valid_ip and valid_action and valid_password:
return True
else:
return False
def get_existing_ptr_record(ip_address):
proc_exist = Popen([r'c:\windows\system32\nslookup.exe', ip_address, 'dc1.presidiolab.local'], stdout=PIPE)
for i in proc_exist.stdout:
reg_match = re.match("Name:\s+(\S+)", str(i), re.IGNORECASE)
try:
return reg_match.group(1)
except:
pass
def add_a_ptr_record(fqdn, ip_address):
proc_a_ptr = Popen(['c:\windows\system32\dnscmd.exe', 'dc1.presidiolab.local', '/RecordAdd', 'presidiolab.local', fqdn.partition('.')[0].rstrip(), '/CreatePTR', 'A', ip_address], shell=True, stdout=PIPE)
return proc_a_ptr.stdout
def delete_ptr_record(ip_address):
in_addr = get_in_addr(ip_address)
proc_ptr = Popen(['c:\windows\system32\dnscmd.exe', 'dc1.presidiolab.local', '/RecordDelete', in_addr.split('.',1)[1], in_addr.split('.',1)[0], 'PTR', '/f'], shell=True, stdout=PIPE)
return proc_ptr.stdout
def delete_a_record(fqdn, ip_address):
proc_a = Popen(['c:\windows\system32\dnscmd.exe', 'dc1.presidiolab.local', '/RecordDelete', 'presidiolab.local', fqdn.partition('.')[0], 'A', ip_address, '/f'], shell=True, stdout=PIPE)
return proc_a.stdout
def print_blank_html_form():
print_html_header()
def print_html_header():
print 'Content-type:text/html\r\n\r\n'
print '<html>'
print '<head>'
print ' <title>DNS Updater</title>'
print '</head>'
def print_html_form(valid_form, del_ptr_output, delete_a_output, add_a_ptr_output):
print ' <body>'
print '<samp>'
print '<table align=left border=0 cellpadding=1 cellspacing=1 style=width:100%'
print ' <table align=center border=0 cellpadding=1 cellspacing=1 style=width:100%>'
print ' <tbody>'
print ' <form action=./web-update-dns.py method=post>'
prin
|
t ' <tr>'
print ' <td align="center">Hostname: <input placeholder=hostname type=text name=fqdn></td>'
print ' </tr>'
print ' <tr>'
print ' <td align="center">IP Address: <input placeholder=10.0.0.1 type=text name=ip_address></td>'
print ' </tr>'
print ' <tr>'
print ' <td align="center">Update Password: <input type=password name=passwo
|
rd></td>'
print ' </tr>'
print ' <tr>'
print ' <td align="center" width="500"><input type=radio name=action value=delete> Delete <input type=radio name=action value=add /> Update</td>'
print ' </tr>'
print ' <td align="center">'
if valid_form:
print 'DNS Record Updated Successfully!</td></tr>'
else:
print 'Please enter a valid IP address, hostname, action, and update password.</td></tr>'
print ' <tr>'
print ' <td align="center"><input type=submit value=Submit></td>'
print ' </tr></table><br>'
print ' <table align=left border=0 cellpadding=1 cellspacing=1 style=width:100%><tr><td align="center" width="1000">'
try:
for i in del_ptr_output:
print i
except:
pass
print ' </td></tr>'
print ' <tr><td align="center" width="1000">'
try:
for i in delete_a_output:
print i
except:
pass
print ' </td></tr>'
print ' <tr><td align="center" width="1000">'
try:
for i in add_a_ptr_output:
print i
except:
pass
print ' </td></tr>'
print ' </form>'
print ' </tbody>'
print ' </table>'
print '</table>'
print '</samp>'
def print_html_footer():
print ' </body>'
print '</html>'
valid_form = is_form_valid(ip_address, fqdn, action, password)
del_ptr_output = []
delete_a_output = []
add_a_ptr_output = []
if valid_form:
if action == 'add':
cont = 1
while cont == 1:
existing_record = get_existing_ptr_record(ip_address)
if existing_record:
delete_a_output = delete_a_record(existing_record, ip_address)
del_ptr_output = delete_ptr_record(ip_address)
del existing_record
#cont = 0
else:
if fqdn == 'blank':
pass
cont = 0
else:
add_a_ptr_output = add_a_ptr_record(fqdn, ip_address)
cont = 0
elif action == 'delete':
cont = 1
while cont == 1:
existing_record = get_existing_ptr_record(ip_address)
if existing_record:
delete_a_output = delete_a_record(existing_record, ip_address)
del_ptr_output = delete_ptr_record(ip_address)
del existing_record
#cont = 0
else:
cont = 0
pass
print_html_header()
print_html_form(valid_form, del_ptr_output, delete_a_output, add_a_ptr_output)
print_html_footer()
else:
del_ptr_output = []
delete_a_output = []
add_a_ptr_output = []
print_html_header()
print_html_form(valid_form, del_ptr_output, delete_a_output, add_a_ptr_output)
print_html_footer()
|
Mo-Talha/Nomad
|
data/search/elastic.py
|
Python
|
mit
| 7,634
| 0.001703
|
from datetime import datetime
import mongoengine
import elasticsearch
from elasticsearch import helpers
from models.employer import Employer
import models.term as Term
import shared.secrets as secrets
import shared.logger as logger
COMPONENT = 'Search'
elastic_instance = elasticsearch.Elasticsearch()
def index_employer_waterlooworks(employer):
employer_document = {
"_index": "waterlooworks",
"_type": "employers",
"_id": employer.name,
"_source": {
"employer_name": employer.name,
"employer_jobs": [str(job.id) for job in employer.jobs]
}
}
elastic_instance.index('waterlooworks', 'employers', employer_document, id=employer.name)
def update_employer_waterlooworks(employer):
employer_document = {
"doc": {
"employer_name": employer.name,
"employer_jobs": [str(job.id) for job in employer.jobs]
}
}
elastic_instance.update('waterlooworks', 'employers', employer.name, body=employer_document)
def delete_employer_waterlooworks(employer):
elastic_instance.delete('waterlooworks', 'employers', employer.name, ignore=[404])
def index_job_waterlooworks(employer, job):
job_document = {
"_index": "waterlooworks",
"_type": "jobs",
"_parent": employer.name,
"_id": str(job.id),
"_source": {
"employer_name": employer.name,
"job_title": job.title,
"job_year": job.year,
"job_term": job.term,
"job_summary": job.summary,
"job_keywords": [k.keyword for k in job.keywords],
"job_locations": [location.name for location in job.location],
"job_programs": job.programs,
"job_levels": job.levels
}
}
elastic_instance.index('waterlooworks', 'jobs', job_document, id=str(job.id), parent=employer.name)
def update_job_waterlooworks(employer, job):
job_document = {
"doc": {
"employer_name": employer.name,
"job_title": job.title,
"job_year": job.year,
"job_term": job.term,
"job_summary": job.summary,
"job_keywords": [k.keyword for k in job.keywords],
"job_locations": [location.name for location in job.location],
"job_programs": job.programs,
"job_levels": job.levels
}
}
elastic_instance.update('waterlooworks', 'jobs', str(job.id), body=job_document, parent=employer.name)
def delete_job_waterlooworks(employer, job):
elastic_instance.delete('waterlooworks', 'job', str(job.id), parent=employer.name, ignore=[404])
def index_waterlooworks():
logger.info(COMPONENT, 'Indexing waterlooworks data')
elastic_instance.indices.delete(index='waterlooworks', ignore=[404])
elastic_instance.indices.create('waterlooworks', body={
"mappings": {
"employers": {
"properties": {
"employer_name": {"type": "string"},
"employer_jobs": {"type": "string"}
}
},
"jobs": {
"_parent": {
"type": "employers"
},
"properties": {
"job_title": {"type": "string"},
"job_year": {"type": "integer"},
"job_term": {"type": "string"},
"job_summary": {"type": "string"},
"job_locations": {"type": "string"},
"job_programs": {"type": "string"},
"job_levels": {"type": "string"}
}
}
}
})
logger.info(COMPONENT, 'Indexing waterlooworks employers and jobs')
employers = []
jobs = []
for employer in Employer.objects.only('name', 'jobs'):
logger.info(COMPONENT, 'Indexing employer: {}'.format(employer.name))
employer_document = {
"_index": "waterlooworks",
"_type": "employers",
"_id": employer.name,
"_source": {
"employer_name": employer.name,
"employer_jobs": [str(job.id) for job in employer.jobs]
}
}
employers.append(employer_document)
for job in employer.jobs:
if not job.deprecated:
logger.info(COMPONENT, 'Indexing job: {} for employer: {}'.format(job.title, employer.name))
job_document = {
"_ind
|
ex": "waterlooworks",
"_type": "jobs",
"_parent": employer.name,
"_id": str(job.id),
"_source": {
"employer_name": employer.name,
"job_title": job.title,
"job_year": job.year,
"job_term": job.term,
"job_summary": job.summary,
|
"job_keywords": [k.keyword for k in job.keywords],
"job_locations": [location.name for location in job.location],
"job_programs": job.programs,
"job_levels": job.levels
}
}
jobs.append(job_document)
if len(jobs) == 1000:
helpers.bulk(elastic_instance, jobs)
jobs = []
if len(employers) == 1000:
helpers.bulk(elastic_instance, employers)
employers = []
if len(employers) > 0:
helpers.bulk(elastic_instance, employers)
if len(jobs) > 0:
helpers.bulk(elastic_instance, jobs)
def query_jobs_and_employers(query, page):
start_page = 10 * (int(page) - 1)
now = datetime.now()
response = elastic_instance.search(index='waterlooworks', doc_type=['jobs'], body={
"from": start_page, "size": 10,
"sort": [
{"job_year": "desc"},
"_score"
],
"query": {
"bool": {
"should": [
{
"match": {
"job_term": Term.get_term(now.month)
}
}
],
"must": {
"multi_match": {
"query": query,
"type": "cross_fields",
"fields": ["employer_name^4", "job_title^4", "job_term"]
}
}
}
}
})
return response
def query_jobs(query, page):
start_page = 10 * (int(page) - 1)
now = datetime.now()
body = {
"from": start_page, "size": 10,
"sort": [
{"job_year": "desc"},
"_score"
],
"query": {
"bool": {
"should": [
{
"match": {
"job_term": Term.get_term(now.month)
}
}
]
}
}
}
if query:
body['query']['bool']['must'] = {
"multi_match": {
"query": query,
"type": "cross_fields",
"fields": ["job_title^4", "job_keywords^4", "job_summary^3", "job_term"]
}
}
response = elastic_instance.search(index='waterlooworks', doc_type=['jobs'], body=body)
return response
if __name__ == "__main__":
mongoengine.connect(secrets.MONGO_DATABASE, host=secrets.MONGO_HOST, port=secrets.MONGO_PORT)
index_waterlooworks()
|
nathanielvarona/airflow
|
airflow/migrations/versions/64a7d6477aae_fix_description_field_in_connection_to_.py
|
Python
|
apache-2.0
| 2,586
| 0.000387
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apach
|
e.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""fix description field in connection to be text
Revision ID: 64a7d6477aae
Revise
|
s: f5b5ec089444
Create Date: 2020-11-25 08:56:11.866607
"""
import sqlalchemy as sa # noqa
from alembic import op # noqa
# revision identifiers, used by Alembic.
revision = '64a7d6477aae'
down_revision = '61ec73d9401f'
branch_labels = None
depends_on = None
def upgrade():
"""Apply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.String(length=5000),
type_=sa.Text(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column('connection', 'description', existing_type=sa.String(length=5000), type_=sa.Text())
def downgrade():
"""Unapply fix description field in connection to be text"""
conn = op.get_bind() # pylint: disable=no-member
if conn.dialect.name == "sqlite":
# in sqlite TEXT and STRING column types are the same
return
if conn.dialect.name == "mysql":
op.alter_column(
'connection',
'description',
existing_type=sa.Text(5000),
type_=sa.String(length=5000),
existing_nullable=True,
)
else:
# postgres does not allow size modifier for text type
op.alter_column(
'connection',
'description',
existing_type=sa.Text(),
type_=sa.String(length=5000),
existing_nullable=True,
)
|
sparkslabs/kamaelia_
|
Sketches/RJL/Kamaelia/Community/RJL/Kamaelia/Protocol/HTTP/ErrorPages.py
|
Python
|
apache-2.0
| 3,275
| 0.011298
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# dist
|
ributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
|
========================
websiteErrorPage
========================
A simple HTTP request handler for HTTPServer.
websiteErrorPage generates basic HTML error pages for an HTTP server.
"""
from Axon.Component import component
def getErrorPage(errorcode, msg = ""):
if errorcode == 400:
return {
"statuscode" : "400",
"data" : u"<html>\n<title>400 Bad Request</title>\n<body style='background-color: black; color: white;'>\n<h2>400 Bad Request</h2>\n<p>" + msg + "</p></body>\n</html>\n\n",
"type" : "text/html",
}
elif errorcode == 404:
return {
"statuscode" : "404",
"data" : u"<html>\n<title>404 Not Found</title>\n<body style='background-color: black; color: white;'>\n<h2>404 Not Found</h2>\n<p>" + msg + u"</p></body>\n</html>\n\n",
"type" : "text/html"
}
elif errorcode == 500:
return {
"statuscode" : "500",
"data" : u"<html>\n<title>500 Internal Server Error</title>\n<body style='background-color: black; color: white;'>\n<h2>500 Internal Server Error</h2>\n<p>" + msg + u"</p></body>\n</html>\n\n",
"type" : "text/html"
}
elif errorcode == 501:
return {
"statuscode" : "501",
"data" : u"<html>\n<title>501 Not Implemented</title>\n<body style='background-color: black; color: white;'>\n<h2>501 Not Implemented</h2>\n<p>" + msg + u"</p></body>\n</html>\n\n",
"type" : "text/html"
}
else:
return {
"statuscode" : str(errorcode),
"data" : u"",
"type" : "text/html"
}
class websiteErrorPage(component):
def __init__(self, errorcode, msg = ""):
super(websiteErrorPage, self).__init__()
self.errorcode = errorcode
self.msg = msg
def main(self):
resource = getErrorPage(self.errorcode, self.msg) # get the error page
resource["incomplete"] = False # mark its data as being complete (i.e. no more chunks to come)
self.send(resource, "outbox") # send it on to HTTPRequestHandler
self.send(producerFinished(self), "signal") # and signal that this component has terminated
__kamaelia_components__ = ( websiteErrorPage, )
|
lindseypack/NIM
|
devices/migrations/0013_auto_20140925_1616.py
|
Python
|
mit
| 981
| 0.004077
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import m
|
odels, migrations
class Migration(migrations.Migration):
dependencies = [
('devices', '0012_auto_20140925_1540'),
]
operations = [
migrations.AlterField(
model_name='ap',
name='notes',
field=models.TextField(default=b'', verbose_name=b'Notes', blank=True),
),
migrations.AlterField(
model_name='phone',
name='notes',
field=models.TextField(default=b'
|
', verbose_name=b'Notes', blank=True),
),
migrations.AlterField(
model_name='switch',
name='notes',
field=models.TextField(default=b'', verbose_name=b'Notes', blank=True),
),
migrations.AlterField(
model_name='ups',
name='notes',
field=models.TextField(default=b'', verbose_name=b'Notes', blank=True),
),
]
|
0111001101111010/open-health-inspection-api
|
venv/lib/python2.7/site-packages/pymongo/common.py
|
Python
|
gpl-2.0
| 23,408
| 0.000299
|
# Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Functions and classes common to multiple pymongo modules."""
import sys
import warnings
from pymongo import read_preferences
from pymongo.auth import MECHANISMS
from pymongo.read_preferences import ReadPreference
from pymongo.errors import ConfigurationError
HAS_SSL = True
try:
import ssl
except ImportError:
HAS_SSL = False
# Jython 2.7 includes an incomplete ssl module. See PYTHON-498.
if sys.platform.startswith('java'):
HAS_SSL = False
def raise_config_error(key, dummy):
"""Raise ConfigurationError with the given key name."""
raise ConfigurationError("Unknown option %s" % (key,))
def validate_boolean(option, value):
"""Validates that 'value' is 'true' or 'false'.
"""
if isinstance(value, bool):
return value
elif isinstance(value, basestring):
if value not in ('true', 'false'):
raise ConfigurationError("The value of %s must be "
"'true' or 'false'" % (option,))
return value == 'true'
raise TypeError("Wrong type for %s, value must be a boolean" % (option,))
def validate_integer(option, value):
"""Validates that 'value' is an integer (or basestring representation).
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if not value.isdigit():
raise ConfigurationError("The value of %s must be "
"an integer" % (option,))
return int(value)
raise TypeError("Wrong type for %s, value must be an integer" % (option,))
def validate_positive_integer(option, value):
"""Validate that 'value' is a positive integer.
"""
val = validate_integer(option, value)
if val < 0:
raise ConfigurationError("The value of %s must be "
"a positive integer" % (option,))
return val
def validate_readable(option, value):
"""Validates that 'value' is file-like and readable.
"""
# First make sure its a string py3.3 open(True, 'r') succeeds
# Used in ssl cert checking due to poor ssl module error reporting
value = validate_basestring(option, value)
open(value, 'r').close()
return value
def validate_cert_reqs(option, value):
"""Validate the cert reqs are valid. It must be None or one of the three
values ``ssl.CERT_NONE``, ``ssl.CERT_OPTIONAL`` or ``ssl.CERT_REQUIRED``"""
if value is None:
return value
if HAS_SSL:
if value in (ssl.CERT_NONE, ssl.CERT_OPTIONAL, ssl.CERT_REQUIRED):
return value
raise ConfigurationError("The value of %s must be one of: "
"`ssl.CERT_NONE`, `ssl.CERT_OPTIONAL` or "
"`ssl.CERT_REQUIRED" % (option,))
else:
raise ConfigurationError("The value of %s is set but can't be "
"validated. The ssl module is not available"
% (option,))
def validate_positive_integer_or_none(option, value):
"""Validate that 'value' is a positive integer or None.
"""
if value is None:
return value
return validate_positive_integer(option, value)
def validate_basestring(option, value):
"""Validates that 'value' is an instance of `basestring`.
"""
if isinstance(value, basestring):
return value
raise TypeError("Wrong type for %s, value must be an "
"instance of %s" % (option, basestring.__name__))
def validate_int_or_basestring(option, value):
"""Validates that 'value' is an integer or string.
"""
if isinstance(value, (int, long)):
return value
elif isinstance(value, basestring):
if value.isdigit():
return int(value)
return value
raise TypeError("Wrong type for %s, value must be an "
"integer or a string" % (option,))
def validate_positive_float(option, value):
"""Validates that 'value' is a float, or can be converted to one, and is
positive.
"""
err = ConfigurationError("%s must be a positive int or float" % (option,))
try:
value = float(value)
except (ValueError, TypeError):
raise err
# float('inf') doesn't work in 2.4 or 2.5 on Windows, so just cap floats at
# one billion - this is a reasonable approximation for infinity
if not 0 < value < 1e9:
raise err
return value
def validate_timeout_or_none(option, value):
"""Validates a timeout specified in milliseconds returning
a value in floating point seconds.
"""
if value is None:
return value
return validate_positive_float(option, value) / 1000.0
def validate_read_preference(dummy, value):
"""Validate read preference for a ReplicaSetConnection.
"""
if value in read_preferences.modes:
return value
# Also allow string form of enum for uri_parser
try:
return read_preferences.mongos_enum(value)
except ValueError:
raise ConfigurationError("Not a valid read preference")
def validate_tag_sets(dummy, value):
"""Validate tag sets for a ReplicaSetConnection.
"""
if value is None:
return [{}]
if not isinstance(value, list):
raise ConfigurationError((
"Tag sets %s invalid, must be a list" ) % repr(value))
if len(value) == 0:
raise ConfigurationError((
"Tag sets %s invalid, must be None or contain at least one set of"
" tags") % repr(value))
for tags in value:
if not isinstance(tags, dict):
raise ConfigurationError(
"Tag set %s invalid, must be a dict" % repr(tags))
return value
def validate_auth_mechanism(option, value):
"""Validate the authMechanism URI option.
"""
if value not in MECHANISMS:
raise ConfigurationError("%s must be in "
"%s" % (option, MECHANISMS))
return value
# jounal is an alias for j,
# wtimeoutms is an alias for wtimeout
VALIDATORS = {
'replicaset': validate_basestring,
'slaveok': validate_boolean,
'slave_okay': validate_boolean,
'safe': validate_boolean,
'w': validate_int_or_basestring,
'wtimeout': validate_integer,
'wtimeoutms': validate_integer,
'fsync': validate_boolean,
'j': validate_boolean,
'journal': validate_boolean,
'connecttimeoutms': validate_timeout_or_none,
'sockettimeoutms': validate_timeout_or_none,
'waitqueuetimeoutms': validate_timeout_or_none,
'waitqueuemultiple': validate_positive_integer_or_none,
'ssl': validate_boolean,
'ssl_keyfile': val
|
idate_readable,
'ssl_certfile': validate_readable,
'ssl_cert_reqs': validate_cert_reqs,
'ssl_ca_certs': validate_readable,
'readpreference': validate_read
|
_preference,
'read_preference': validate_read_preference,
'tag_sets': validate_tag_sets,
'secondaryacceptablelatencyms': validate_positive_float,
'secondary_acceptable_latency_ms': validate_positive_float,
'auto_start_request': validate_boolean,
'use_greenlets': validate_boolean,
'authmechanism': validate_auth_mechanism,
'authsource': validate_basestring,
'gssapiservicename': validate_basestring,
}
_AUTH_OPTIONS = frozenset(['gssapiservicename'])
def validate_auth_option(option, value):
"""Validate optional authentication parameters.
"""
lower, value = validate(option, value)
if lower not in _AUTH_OPTIONS:
raise ConfigurationError('Unknown '
'authentic
|
endlessm/chromium-browser
|
tools/style_variable_generator/PRESUBMIT.py
|
Python
|
bsd-3-clause
| 746
| 0
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting tools/style_variable_generator/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
WHITELIST = [r'.+_test.py$']
def CheckChangeOnUpload(input_api, output_api):
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.'
|
, whitelist=WHITELIST)
def CheckChangeOnCommit(input_api, output_api):
|
return input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api, '.', whitelist=WHITELIST)
|
bdzimmer/handwriting
|
handwriting/improc.py
|
Python
|
bsd-3-clause
| 7,094
| 0.000846
|
# -*- coding: utf-8 -*-
"""
Image processing and feature extraction functions.
"""
import cv2
import numpy as np
def pad_image(im, width, height, border=255):
"""pad char image in a larger image"""
xoff = abs(int((im.shape[1] - width) / 2))
yoff = abs(int((im.shape[0] - height) / 2))
if width >= im.shape[1]:
x_min_old = 0
x_max_old = im.shape[1]
x_min_new = xoff
x_max_new = im.shape[1] + xoff
else:
x_min_old = xoff
x_max_old = width + xoff
x_min_new = 0
x_max_new = width
if height >= im.shape[0]:
y_min_old = 0
y_max_old = im.shape[0]
y_min_new = yoff
y_max_new = im.shape[0] + yoff
else:
y_min_old = yoff
y_max_old = height + yoff
y_min_new = 0
y_max_new = height
image_subset = im[y_min_old:y_max_old, x_min_old:x_max_old]
new_bmp = np.ones((height, width, 3), dtype=np.uint8) * border
new_bmp[y_min_new:y_max_new, x_min_new:x_max_new] = image_subset
return new_bmp
def transform_random(image, trans_size, rot_size, scale_size):
"""apply a small random transformation to an image"""
# TODO: make ranges of random numbers input parameters
trans = (np.random.rand(2) - 0.5) * np.array(trans_size)
rot = (np.random.rand(4) - 0.5) * rot_size
scale = 1.0 + scale_size * (np.random.rand(1)[0] - 0.5)
x_size = image.shape[1]
y_size = image.shape[0]
trans_to_center = np.float32(
[[1, 0, -x_size / 2.0],
[0, 1, -y_size / 2.0],
[0, 0, 1]])
trans_from_center = np.float32(
[[1, 0, x_size / 2.0],
[0, 1, y_size / 2.0],
[0, 0, 1]])
trans_random = np.float32(
[[1 + rot[0], 0 + rot[1], trans[0]],
[0 + rot[2], 1 + rot[3], trans[1]],
[0, 0, 1]])
trans_scale = np.identity(3, dtype=np.float32) * scale
tmat = np.dot(trans_from_center, np.dot(trans_scale, np.dot(trans_random, trans_to_center)))[0:2, :]
image_new = cv2.warpAffine(
image, tmat,
(image.shape[1], image.shape[0]),
borderValue=(255, 255, 255))
# cv2.imshow("image", image)
# cv2.imshow("new_image", image_new)
# cv2.waitKey()
return image_new
def filter_cc(image):
"""find connected components in a threshold image and white out
everything except the second largest"""
# TODO: better way to select relevant components
comp_filt = np.copy(image)
gray = 255 - np.array(np.sum(image, axis=2) / 3.0, dtype=np.uint8)
_, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
connectivity = 4
comps = cv2.connectedComponentsWithStats(thresh, connectivity, cv2.CV_32S)
labels = comps[1]
sizes = comps[2][:, cv2.CC_STAT_AREA]
# get index of second-largest component
if len(sizes) > 1:
second_largest_idx = np.argsort(sizes)[-2]
else:
second_largest_idx = np.argsort(sizes)[-1]
# eliminate everything else
for label_idx in range(len(sizes)):
if label_idx != second_largest_idx:
comp_filt[labels == label_idx] = 255
# cv2.imshow("image", image)
# cv2.imshow("gray", gray)
# cv2.imshow("thresh", thresh)
# cv2.imshow("comp_filt", comp_filt)
# cv2.waitKey()
return comp_filt
def align(image, x_align=True, y_align=True):
"""shift an image so the center of mass of the pixels is centered"""
# TODO: this should just operate on grayscale
gray = 255 - np.array(np.sum(image, axis=2) / 3.0, dtype=np.uint8)
if x_align:
x_size = image.shape[1]
x_mean = np.sum(np.sum(gray, axis=0) * np.arange(x_size)) / np.sum(gray)
x_shift = x_size / 2.0 - x_mean
else:
x_shift = 0.0
if y_align:
y_size = image.shape[0]
y_mean = np.sum(np.sum(gray, axis=1) * np.arange(y_size)) / np.sum(gray)
y_shift = y_size / 2.0 - y_mean
else:
y_shift = 0.0
tmat = np.float32(
[[1, 0, x_shift],
[0, 1, y_shift]])
new_image = cv2.warpAffine(
image, tmat, (image.shape[1], image.shape[0]), borderValue=(255, 255, 255))
# cv2.imshow("image", image)
# cv2.imshow("new_image", new_image)
# cv2.waitKey()
return new_image
def grayscale(image):
"""convert RGB ubyte image to grayscale"""
return np.sum(image, axis=2) / 3.0
def downsample(image, scale_factor):
"""downsample an image and unravel to create a feature vector"""
feats = cv2.resize(
image,
(int(image.shape[0] * scale_factor),
int(image.shape[1] * scale_factor)))
return feats
def downsample_4(image):
"""create a
|
feature vector from four downsampling amounts"""
return downsample_multi(image, [0.4, 0.2, 0.1, 0.05])
def downsample_multi(image, scales):
"""create a feature vector from arbitrary downsampling amounts"""
return np.hstack([np.rav
|
el(downsample(image, x)) for x in scales])
def max_pool(im):
"""perform 2x2 max pooling"""
return np.max(
np.stack(
(im[0::2, 0::2],
im[0::2, 1::2],
im[1::2, 0::2],
im[1::2, 1::2]),
axis=-1),
axis=-1)
def max_pool_multi(image, ns):
"""perform multiple levels of max pooling and unravel
to create a feature vector"""
# TODO: move this to a higher level
# image_gray = _grayscale(image)
if 1 in ns:
res = [image]
else:
res = []
for n in range(2, max(ns) + 1):
image = max_pool(image)
if n in ns:
res.append(image)
return np.hstack([np.ravel(y) for y in res])
def column_ex(gray):
"""experimental feature - something like the center of mass of
overlapping columns of the image"""
width = 2
# mul_mat = np.arange(y_size)[:, np.newaxis]
# for some reason, it works a lot better to not divide by the sum of the
# whole window but only the first column.
mul_mat = np.linspace(0, 1, gray.shape[0])[:, np.newaxis]
y_agg = np.array([(np.sum(gray[:, idx + width] * mul_mat) /
np.sum(gray[:, idx]))
for idx in range(gray.shape[1] - width)])
y_agg[~np.isfinite(y_agg)] = 0.0
res = np.hstack((y_agg, np.diff(y_agg)))
return res
def extract_pos(pos, im, border=255):
"""extract a position (tuple of start and end) from an image"""
# this is intended to have the correct logic to always return an image
# of the width of the position even if it is off the edge of the image
target_width = pos[1] - pos[0]
extract = im[:, np.maximum(pos[0], 0):pos[1]]
# print(cpos, extract.shape, im.shape)
if extract.shape[1] < target_width:
res = np.ones((im.shape[0], target_width, 3), dtype=np.ubyte) * border
if pos[0] < 0:
pr = (-pos[0], -pos[0] + extract.shape[1])
else:
pr = (0, extract.shape[1])
# print(pr, flush=True)
res[:, pr[0]:pr[1]] = extract
return res
else:
res = extract
return res
|
kave/cfgov-refresh
|
cfgov/v1/admin.py
|
Python
|
cc0-1.0
| 143
| 0.013986
|
fr
|
om django.contrib import admin
from models.snippets import Contact
@admin.register(Contact)
class ContactAdmin(admin.ModelAdmin):
pas
|
s
|
NERC-CEH/jules-jasmin
|
majic/joj/services/dataset.py
|
Python
|
gpl-2.0
| 13,566
| 0.003096
|
"""
# Majic
# Copyright (C) 2014 CEH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from sqlalchemy import or_
from sqlalchemy.orm import joinedload, contains_eager, subqueryload
from joj.model import Dataset, DatasetType, DrivingDataset, DrivingDatasetParameterValue, Parameter, \
DrivingDatasetLocation
from joj.services.general import DatabaseService
from joj.model.non_database.spatial_extent import SpatialExtent
from joj.model.non_database.temporal_extent import TemporalExtent
from joj.utils import constants
from joj.model.non_database.driving_dataset_jules_params import DrivingDatasetJulesParams
class DatasetService(DatabaseService):
"""Encapsulates operations on Map datasets"""
def get_datasets_for_user(self, user_id, dataset_type=None, dataset_type_id=None):
"""
Returns a list of datasets that the supplied user has access to,
and is of a particular type. This can be specified as either an ID
or a name, depending on which is more convenient
:param user_id: ID of the user to get a list of datasets for
:param dataset_type: String name of the dataset type
:param dataset_type_id: ID of the dataset type
"""
with self.readonly_scope() as session:
# Find all datasets that are viewable by this user (private)
# or are public (null viewable_by)
# Note SQLAlchemy wants '== None' not 'is None'
if dataset_type_id is None and dataset_type is None:
return session.query(DatasetType).join(DatasetType.datasets) \
.options(contains_eager(DatasetType.datasets)) \
.filter(or_(Dataset.viewable_by_user_id == user_id,
Dataset.viewable_by_user_id == None), Dataset.deleted == False).all()
elif dataset_type_id is None:
return session.query(Dataset).join(DatasetType).filter(DatasetType.type == dataset_type,
or_(Dataset.viewable_by_user_id == user_id,
Dataset.viewable_by_user_id == None),
Dataset.deleted == False).all()
else:
return session.query(Dataset).
|
filter(Dataset.dataset_type_id == dataset_type_id,
or_(Dataset.viewable_by_user_id == user_id,
Dataset.viewable_by_user_id == None),
|
Dataset.deleted == False).all()
def get_dataset_types(self):
"""Returns all of the dataset types in the system"""
with self.readonly_scope() as session:
return session.query(DatasetType).all()
def get_dataset_by_id(self, dataset_id, user_id=None):
""" Returns a single dataset with the given ID
:param dataset_id: ID of the dataset to look for
:param user_id: Optional user ID to match
"""
with self.readonly_scope() as session:
dataset = session.query(Dataset) \
.options(joinedload(Dataset.dataset_type)) \
.filter(Dataset.id == dataset_id,
or_(Dataset.viewable_by_user_id == user_id,
Dataset.viewable_by_user_id == None)).one()
if dataset.data_range_to < dataset.data_range_from:
tmp = dataset.data_range_from
dataset.data_range_from = dataset.data_range_to
dataset.data_range_to = tmp
return dataset
def get_all_datasets(self):
"""
Returns a list of all active datasets in EcoMaps
"""
with self.readonly_scope() as session:
return session.query(Dataset) \
.options(joinedload(Dataset.dataset_type)) \
.filter(Dataset.deleted == False) \
.all()
def create_coverage_dataset(self, name, wms_url, netcdf_url, low_res_url,
data_range_from, data_range_to, is_categorical):
"""
Creates a coverage dataset in the EcoMaps DB
@param name: Display name of the dataset
@param wms_url: Endpoint for the mapping server
@param netcdf_url: URL of the OpenDAP endpoint for this dataset
@param low_res_url: URL for accessing the NetCDF file over the HTTP protocol
@param data_range_from: Low range for the data
@param data_range_to: High range for the data
@param is_categorical: Set to true if the data is categorical (not continuous)
"""
with self.transaction_scope() as session:
dataset_type = session.query(DatasetType).filter(DatasetType.type == 'Coverage').one()
dataset = Dataset()
dataset.name = name
dataset.dataset_type = dataset_type
dataset.netcdf_url = netcdf_url
dataset.wms_url = wms_url
dataset.low_res_url = low_res_url
dataset.data_range_from = data_range_from
dataset.data_range_to = data_range_to
dataset.is_categorical = is_categorical
session.add(dataset)
def create_point_dataset(self, name, wms_url, netcdf_url):
"""
Creates a point dataset in the EcoMaps DB
@param name: Display name of the dataset
@param wms_url: Endpoint for the mapping server
@param netcdf_url: URL of the OpenDAP endpoint for this dataset
"""
with self.transaction_scope() as session:
dataset_type = session.query(DatasetType).filter(DatasetType.type == 'Point').one()
dataset = Dataset()
dataset.name = name
dataset.dataset_type = dataset_type
dataset.netcdf_url = netcdf_url
dataset.wms_url = wms_url
dataset.low_res_url = None
session.add(dataset)
def delete(self, id, user_id):
"""
Soft-deletes a dataset to remove it from active lists
@param id: ID of dataset to delete
@param user_id: ID of the user attempting the delete operation
"""
# First let's make sure the user specified can access the dataset
ds = self.get_dataset_by_id(id, user_id)
if ds:
with self.transaction_scope() as session:
dataset = session.query(Dataset).get(id)
dataset.deleted = True
session.add(dataset)
def update(self, id, data_range_from, data_range_to, is_categorical):
"""
Updates basic properties on the dataset specified
@param id: ID of the dataset to update
@param data_range_from: Low range of data
@param data_range_to: High range of data
@param is_categorical: Set to true for non-continuous data
"""
with self.transaction_scope() as session:
dataset = session.query(Dataset).get(id)
dataset.data_range_from = data_range_from
dataset.data_range_to = data_range_to
dataset.is_categorical = is_categorical
session.add(dataset)
def get_driving_datasets(self, user):
"""
Returns a list of availiable driving datasets
If you are an admin this is all o
|
lem9/weblate
|
weblate/accounts/forms.py
|
Python
|
gpl-3.0
| 20,803
| 0
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
import re
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Fieldset, HTML
from django import forms
from django.conf import settings
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _, pgettext
from django.contrib.auth import authenticate, password_validation
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import SetPasswordForm as DjangoSetPasswordForm
from django.contrib.auth.models import User
from django.core.validators import validate_email
from django.db.models import Q
from django.forms.widgets import EmailInput
from django.middleware.csrf import rotate_token
from django.utils.encoding import force_text
from weblate.accounts.auth import try_get_user
from weblate.accounts.models import Profile, get_all_user_mails
from weblate.accounts.captcha import MathCaptcha
from weblate.accounts.notifications import notify_account_activity
from weblate.accounts.pipeline import USERNAME_RE
from weblate.accounts.ratelimit import reset_rate_limit, check_rate_limit
from weblate.lang.models import Language
from weblate.trans.models import Project
from weblate.trans.util import sort_choices
from weblate.utils import messages
from weblate.utils.validators import validate_fullname
from weblate.logger import LOGGER
# Reject some suspicious email addresses, based on checks enforced by Exim MTA
EMAIL_BLACKLIST = re.compile(r'^([./|]|.*([@%!`#&?]|/\.\./))')
class UniqueEmailMixin(object):
validate_unique_mail = False
def clean_email(self):
"""Validate that the supplied email address is unique for the site. """
self.cleaned_data['email_user'] = None
mail = self.cleaned_data['email']
users = User.objects.filter(
Q(social_auth__verifiedemail__email__iexact=mail) |
Q(email__iexact=mail)
)
if users.exists():
self.cleaned_data['email_user'] = users[0]
if self.validate_unique_mail:
raise forms.ValidationError(
_(
"This email address is already in use. "
"Please supply a different email address."
)
)
return self.cleaned_data['email']
class PasswordField(forms.CharField):
"""Password field."""
def __init__(self, *args, **kwargs):
kwargs['widget'] = forms.PasswordInput(render_value=False)
kwargs['max_length'] = 256
super(PasswordField, self).__init__(*args, **kwargs)
class EmailField(forms.CharField):
"""Slightly restricted EmailField.
We blacklist some additional local parts."""
widget = EmailInput
default_validators = [validate_email]
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 254
super(EmailField, self).__init__(*args, **kwargs)
def clean(self, value):
value = super(EmailField, self).clean(value)
user_part = value.rsplit('@', 1)[0]
if EMAIL_BLACKLIST.match(user_part):
raise forms.ValidationError(_('Enter a valid email address.'))
if re.match(settings.REGISTRATION_EMAIL_MATCH, value):
return value
raise forms.ValidationError(_('This email address is not allowed.'))
class UsernameField(forms.RegexField):
def __init__(self, *args, **kwargs):
help_text = _(
'Username may only contain letters, '
'numbers or the following characters: @ . + - _'
)
kwargs['max_length'] = 30
kwargs['regex'] = USERNAME_RE
kwargs['help_text'] = help_text
kwargs['label'] = _('Username')
kwargs['error_messages'] = {
'invalid': help_text,
}
kwargs['required'] = True
self.valid = None
super(UsernameField, self).__init__(*args, **kwargs)
def clean(self, value):
"""Username validation, requires unique name."""
if value is None:
return
if value.startswith('.'):
raise forms.ValidationError(
_('Username can not start with full stop.')
)
if value is not None:
existing = User.objects.filter(username=value)
if existing.exists() and value != self.valid:
raise forms.ValidationError(
_(
'This username is already taken. '
'Please choose another.'
)
)
return super(UsernameField, self).clean(value)
class FullNameField(forms.CharField):
default_validators = [validate_fullname]
def __init__(self, *args, **kwargs):
# The Django User model limit is 30 chars, this should
# be raised if we switch to custom User model
kwargs['max_length'] = 30
kwargs['label'] = _('Full name')
kwargs['required'] = True
super(FullNameField, self).__init__(*args, **kwargs)
class SortedSelectMixin(object):
"""Mixin for Select widgets to sort choices alphabetically."""
def render_options(self, selected_choices):
"""Render sorted options."""
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
# Actually sort values
all_choices = sort_choices(list(self.choices))
# Stolen from Select.render_options
for option_value, option_label in all_choices:
output.append(
self.render_
|
option(
selected_choices, option_value, option_label
)
)
return '\n'.join(output)
class SortedSelectMultiple(SortedSelectMixin, forms.SelectMultiple):
"""Wrapper class to sort choices alphabetically."""
class SortedSelect(SortedSelectMixin, forms.Select):
"""Wrapper class to sort choices alphabetically."""
class ProfileForm(forms.ModelForm):
"""User profile editing."""
class Meta(object):
model = Profile
|
fields = (
'language',
'languages',
'secondary_languages',
)
widgets = {
'language': SortedSelect,
'languages': SortedSelectMultiple,
'secondary_languages': SortedSelectMultiple,
}
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
# Limit languages to ones which have translation
qs = Language.objects.have_translation()
self.fields['languages'].queryset = qs
self.fields['secondary_languages'].queryset = qs
class SubscriptionForm(forms.ModelForm):
"""User subscription management."""
class Meta(object):
model = Profile
fields = (
'subscriptions',
)
widgets = {
'subscriptions': forms.SelectMultiple
}
def __init__(self, *args, **kwargs):
super(SubscriptionForm, self).__init__(*args, **kwargs)
user = kwargs['instance'].user
self.fields['subscriptions'].required = False
self.fields['subscriptions'].queryset = Project.objects.all_acl(user)
class SubscriptionSettingsForm(forms.ModelForm):
"""User subscription management."""
class Meta(object):
model = Profile
fields = Profile.SUBSCRI
|
uclouvain/osis_louvain
|
assessments/templatetags/score_display.py
|
Python
|
agpl-3.0
| 1,668
| 0.0018
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import template
register = template.Library()
@register.filter
def score_display(value, decimal_option):
|
if value is None or str(value) == '-':
return ""
else:
try:
if decimal_option:
return "{0:.2f}".format(value)
|
else:
return "{0:.0f}".format(value)
except:
return value
|
michaelhidalgo/7WCSQ
|
Tools/SQLMap/sqlmap/lib/core/enums.py
|
Python
|
apache-2.0
| 10,079
| 0.004465
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
class PRIORITY:
LOWEST = -100
LOWER = -50
LOW = -10
NORMAL = 0
HIGH = 10
HIGHER = 50
HIGHEST = 100
class SORT_ORDER:
FIRST = 0
SECOND = 1
THIRD = 2
FOURTH = 3
FIFTH = 4
LAST = 100
class DBMS:
ACCESS = "Microsoft Access"
DB2 = "IBM DB2"
FIREBIRD = "Firebird"
MAXDB = "SAP MaxDB"
MSSQL = "Microsoft SQL Server"
MYSQL = "MySQL"
ORACLE = "Oracle"
PGSQL = "PostgreSQL"
SQLITE = "SQLite"
SYBASE = "Sybase"
HSQLDB = "HSQLDB"
INFORMIX = "Informix"
class DBMS_DIRECTORY_NAME:
ACCESS = "access"
DB2 = "db2"
FIREBIRD = "firebird"
MAXDB = "maxdb"
MSSQL = "mssqlserver"
MYSQL = "mysql"
ORACLE = "oracle"
PGSQL = "postgresql"
SQLITE = "sqlite"
SYBASE = "sybase"
HSQLDB = "hsqldb"
INFORMIX = "informix"
class CUSTOM_LOGGING:
PAYLOAD = 9
TRAFFIC_OUT = 8
TRAFFIC_IN = 7
class OS:
LINUX = "Linux"
WINDOWS = "Windows"
class PLACE:
GET = "GET"
POST = "POST"
URI = "URI"
COOKIE = "Cookie"
USER_AGENT = "User-Agent"
REFERER = "Referer"
HOST = "Host"
CUSTOM_POST = "(custom) POST"
CUSTOM_HEADER = "(custom) HEADER"
class POST_HINT:
SOAP = "SOAP"
JSON = "JSON"
JSON_LIKE = "JSON-like"
MULTIPART = "MULTIPART"
XML = "XML (generic)"
ARRAY_LIKE = "Array-like"
class HTTPMETHOD:
GET = "GET"
POST = "POST"
HEAD = "HEAD"
PUT = "PUT"
DELETE = "DELETE"
TRACE = "TRACE"
OPTIONS = "OPTIONS"
CONNECT = "CONNECT"
PATCH = "PATCH"
class NULLCONNECTION:
HEAD = "HEAD"
RANGE = "Range"
SKIP_READ = "skip-read"
class REFLECTIVE_COUNTER:
MISS = "MISS"
HIT = "HIT"
class CHARSET_TYPE:
BINARY = 1
DIGITS = 2
HEXADECIMAL = 3
ALPHA = 4
ALPHANUM = 5
class HEURISTIC_TEST:
CASTED = 1
NEGATIVE = 2
POSITIVE = 3
class HASH:
MYSQL = r'(?i)\A\*[0-9a-f]{40}\Z'
MYSQL_OLD = r'(?i)\A(?![0-9]+\Z)[0-9a-f]{16}\Z'
POSTGRES = r'(?i)\Amd5[0-9a-f]{32}\Z'
MSSQL = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{40}\Z'
MSSQL_OLD = r'(?i)\A0x0100[0-9a-f]{8}[0-9a-f]{80}\Z'
MSSQL_NEW = r'(?i)\A0x0200[0-9a-f]{8}[0-9a-f]{128
|
}\Z'
ORACLE = r'(?i)\As:[0-9a-f]{60}\Z'
ORACLE_OLD = r'(?i)\A[01-9a-f]{16}\Z'
MD5_GENERIC = r'(?i)\
|
A[0-9a-f]{32}\Z'
SHA1_GENERIC = r'(?i)\A[0-9a-f]{40}\Z'
SHA224_GENERIC = r'(?i)\A[0-9a-f]{28}\Z'
SHA384_GENERIC = r'(?i)\A[0-9a-f]{48}\Z'
SHA512_GENERIC = r'(?i)\A[0-9a-f]{64}\Z'
CRYPT_GENERIC = r'(?i)\A(?!\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z)(?![0-9]+\Z)[./0-9A-Za-z]{13}\Z'
WORDPRESS = r'(?i)\A\$P\$[./0-9A-Za-z]{31}\Z'
# Reference: http://www.zytrax.com/tech/web/mobile_ids.html
class MOBILES:
BLACKBERRY = ("BlackBerry 9900", "Mozilla/5.0 (BlackBerry; U; BlackBerry 9900; en) AppleWebKit/534.11+ (KHTML, like Gecko) Version/7.1.0.346 Mobile Safari/534.11+")
GALAXY = ("Samsung Galaxy S", "Mozilla/5.0 (Linux; U; Android 2.2; en-US; SGH-T959D Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1")
HP = ("HP iPAQ 6365", "Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; 240x320; HP iPAQ h6300)")
HTC = ("HTC Sensation", "Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30")
IPHONE = ("Apple iPhone 4s", "Mozilla/5.0 (iPhone; CPU iPhone OS 5_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B179 Safari/7534.48.3")
NEXUS = ("Google Nexus 7", "Mozilla/5.0 (Linux; Android 4.1.1; Nexus 7 Build/JRO03D) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.166 Safari/535.19")
NOKIA = ("Nokia N97", "Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344")
class PROXY_TYPE:
HTTP = "HTTP"
HTTPS = "HTTPS"
SOCKS4 = "SOCKS4"
SOCKS5 = "SOCKS5"
class REGISTRY_OPERATION:
READ = "read"
ADD = "add"
DELETE = "delete"
class DUMP_FORMAT:
CSV = "CSV"
HTML = "HTML"
SQLITE = "SQLITE"
class HTTP_HEADER:
ACCEPT = "Accept"
ACCEPT_CHARSET = "Accept-Charset"
ACCEPT_ENCODING = "Accept-Encoding"
ACCEPT_LANGUAGE = "Accept-Language"
AUTHORIZATION = "Authorization"
CACHE_CONTROL = "Cache-Control"
CONNECTION = "Connection"
CONTENT_ENCODING = "Content-Encoding"
CONTENT_LENGTH = "Content-Length"
CONTENT_RANGE = "Content-Range"
CONTENT_TYPE = "Content-Type"
COOKIE = "Cookie"
EXPIRES = "Expires"
HOST = "Host"
IF_MODIFIED_SINCE = "If-Modified-Since"
LAST_MODIFIED = "Last-Modified"
LOCATION = "Location"
PRAGMA = "Pragma"
PROXY_AUTHORIZATION = "Proxy-Authorization"
PROXY_CONNECTION = "Proxy-Connection"
RANGE = "Range"
REFERER = "Referer"
REFRESH = "Refresh" # Reference: http://stackoverflow.com/a/283794
SERVER = "Server"
SET_COOKIE = "Set-Cookie"
TRANSFER_ENCODING = "Transfer-Encoding"
URI = "URI"
USER_AGENT = "User-Agent"
VIA = "Via"
X_POWERED_BY = "X-Powered-By"
class EXPECTED:
BOOL = "bool"
INT = "int"
class OPTION_TYPE:
BOOLEAN = "boolean"
INTEGER = "integer"
FLOAT = "float"
STRING = "string"
class HASHDB_KEYS:
DBMS = "DBMS"
DBMS_FORK = "DBMS_FORK"
CHECK_WAF_RESULT = "CHECK_WAF_RESULT"
CONF_TMP_PATH = "CONF_TMP_PATH"
KB_ABS_FILE_PATHS = "KB_ABS_FILE_PATHS"
KB_BRUTE_COLUMNS = "KB_BRUTE_COLUMNS"
KB_BRUTE_TABLES = "KB_BRUTE_TABLES"
KB_CHARS = "KB_CHARS"
KB_DYNAMIC_MARKINGS = "KB_DYNAMIC_MARKINGS"
KB_INJECTIONS = "KB_INJECTIONS"
KB_ERROR_CHUNK_LENGTH = "KB_ERROR_CHUNK_LENGTH"
KB_XP_CMDSHELL_AVAILABLE = "KB_XP_CMDSHELL_AVAILABLE"
OS = "OS"
class REDIRECTION:
YES = "Y"
NO = "N"
class PAYLOAD:
SQLINJECTION = {
1: "boolean-based blind",
2: "error-based",
3: "inline query",
4: "stacked queries",
5: "AND/OR time-based blind",
6: "UNION query",
}
PARAMETER = {
1: "Unescaped numeric",
2: "Single quoted string",
3: "LIKE single quoted string",
4: "Double quoted string",
5: "LIKE double quoted string",
}
RISK = {
0: "No risk",
1: "Low risk",
2: "Medium risk",
3: "High risk",
}
CLAUSE = {
0: "Always",
1: "WHERE",
2: "GROUP BY",
3: "ORDER BY",
4: "LIMIT",
5: "OFFSET",
6: "TOP",
7: "Table name",
8: "Column name",
}
class METHOD:
COMPARISON = "comparison"
GREP = "grep"
TIME = "time"
UNION = "union"
class TECHNIQUE:
BOOLEAN = 1
ERROR = 2
QUERY = 3
STACKED = 4
TIME = 5
UNION = 6
class WHERE:
ORIGINAL = 1
NEGATIVE = 2
REPLACE = 3
class WIZARD:
BASIC = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba")
INTERMEDIATE = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getUsers", "getDbs", "getTables", "getSchema", "excludeSysDbs")
ALL = ("getBanner", "getCurrentUser", "getCurrentDb", "isDba", "getHostname", "getUsers", "getPasswordHashes", "getPrivileges", "getRoles", "dumpAll")
class ADJUST_TIME_DELAY:
DISABLE = -1
NO = 0
YES = 1
class WEB_API:
PHP = "php"
ASP = "asp"
ASPX = "aspx"
JSP = "jsp"
class CONTENT_TYPE:
TARGET = 0
TECHNIQUES = 1
DBMS_FINGERPRINT = 2
BANNER = 3
CURRENT_USER = 4
CURRENT_DB = 5
HOSTNAME = 6
IS_DBA = 7
USERS = 8
PASSWORDS = 9
PRIVILEGES = 10
ROLES = 11
DBS = 12
|
ruchee/vimrc
|
vimfiles/bundle/vim-python/submodules/pylint/pylint/checkers/imports.py
|
Python
|
mit
| 38,570
| 0.001037
|
# Copyright (c) 2006-2015 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013 buck@yelp.com <buck@yelp.com>
# Copyright (c) 2014-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015-2016 Moises Lopez <moylop260@vauxoo.com>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Cezar <celnazli@bitdefender.com>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Noam Yorav-Raphael <noamraph@gmail.com>
# Copyright (c) 2015 James Morgensen <james.morgensen@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Jared Garst <cultofjared@gmail.com>
# Copyright (c) 2016 Maik Röder <maikroeder@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2017, 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 Michka Popoff <michkapopoff@gmail.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2017 Erik Wright <erik.wright@shopify.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 Hornwitser <github@hornwitser.no>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 Marianna Polatoglou <mpolatoglou@bloomberg.net>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Nick Smith <clickthisnick@users.noreply.github.com>
# Copyright (c) 2019 Paul Renvoisé <renvoisepaul@gmail.com>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2021 Andrew Howe <howeaj@users.noreply.github.com>
# Copyright (c) 2021 Matus Valo <matusvalo@users.noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""imports checkers for Python code"""
import collections
import copy
import os
import sys
from distutils import sysconfig
from typing import Dict, List
import astroid
from pylint.checkers import BaseChecker, DeprecatedMixin
from pylint.checkers.utils import (
check_messages,
is_from_fallback_block,
node_ignores_exception,
)
from pylint.exceptions import EmptyReportError
from pylint.graph import DotBackend, get_cycles
from pylint.interfaces import IAstroidChecker
from pylint.reporters.ureports.nodes import Paragraph, VerbatimText, VNode
from pylint.utils import IsortDriver, get_global_option
def _qualified_names(modname):
"""Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = modname.split(".")
return [".".join(names[0 : i + 1]) for i in range(len(names))]
def _get_import_name(importnode, modname):
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
"""
if isinstance(importnode, astroid.ImportFrom):
if importnode.level:
root = importnode.root()
if isinstance(root, astroid.Module):
modname = root.relative_to_absolute_name(
modname, level=importnode.level
)
return modname
def _get_first_import(node, context, name, base, level, alias):
"""return the node where [base.]<name> is imported or None if not found"""
fullname = f"{base}.{name}" if base else name
first = None
found = False
for first in context.body:
if first is node:
continue
if first.scope() is node.scope() and first.fromlineno > node.fromlineno:
continue
if isinstance(first, astroid.Import):
if any(fullname == iname[0] for iname in first.names):
found = True
break
elif isinstance(first, astroid.ImportFrom):
if level == first.level:
for imported_name, imported_alias in first.names:
if fullname == f"{first.modname}.{imported_name}":
found = True
break
if (
name != "*"
and name == imported_name
and not (alias or imported_alias)
):
found = True
break
if found:
break
if found and not astroid.are_exclusive(first, node):
return first
return None
def _ignore_import_failure(node, modname, ignored_modules):
for submodule in _qualified_names(modname):
if submodule in ignored_modules:
return True
return node_ignores_exception(node, ImportError)
# utilities to represents import dependencies as tree and dot graph ###########
def _make_tree_defs(mod_files_list):
"""get a list of 2-uple (module, list_of_files_which_import_this_module),
it will return a dictionary to represent this as a tree
"""
tree_defs = {}
for mod, files in mod_files_list:
node = (tree_defs, ())
for prefix in mod.split("."):
node = node[0].setdefault(prefix, [{}, []])
node[1] += files
return tree_defs
def _repr_tree_defs(data, indent_str=None):
"""return a string which represents imports as a tree"""
lines = []
nodes = data.items()
for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])):
if not files:
files = ""
else:
files = "(%s)" % ",".join(sorted(files))
if indent_str is None:
lines.append(f"{mod} {files}")
sub_indent_str = " "
else:
lines.append(fr"{indent_str}\-{mod} {files}")
if i == len(nodes) - 1:
sub_indent_str = "%s " % indent_str
else:
sub_indent_str = "%s| " % indent_str
if sub:
lines.append(_repr_tree_defs(sub, sub_indent_str))
return "\n".join(lines)
def _dependencies_graph(filename: str, dep_info: Dict[str, List[str]]) -> str:
"""write dependencies as a dot (graphviz) file"""
done = {}
printer = DotBackend(os.path.splitext(os.path.basename(filename))[0], rankdir="LR")
printer.emit('URL="." node[shape="box"]')
for modname, dependencies in sorted(dep_info.items()):
done[modname] = 1
printer.emit_node(modname)
for depmodname in dependencies:
if depmodname not in done:
done[depmodname] = 1
printer.emit_node(depmodname)
for depmodname, dependencies in sorted(dep_info.items()):
for modname in dependencies:
printer.emit_edge(modname, depmodname)
return printer.generate(filename)
def _make_graph(filename: str, dep_info: Dict[str, List[str]], sect: VNode, gty
|
pe: str):
|
"""generate a dependencies graph and add some information about it in the
report's section
"""
outputfile = _dependencies_graph(filename, dep_info)
sect.append(Paragraph(f"{gtype}imports graph has been written to {outputfile}"))
# the import checker itself ###################################################
MSGS = {
"E0401": (
"Unable to import %s",
"import-error",
"Used when pylint has been unable to import a mo
|
chrizel/onpsx
|
src/onpsx/core/migrations/0004_auto__add_field_userrole_order__add_field_userprofile_order.py
|
Python
|
gpl-3.0
| 5,858
| 0.008194
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserRole.order'
db.add_column('core_userrole', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'UserProfile.order'
db.add_column('core_userprofile', 'order', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserRole.order'
db.delete_column('core_userrole', 'order')
# Deleting field 'UserProfile.order'
db.delete_column('core_userprofile', 'order')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to'
|
: "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.perm
|
ission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'core.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'games': ('django.db.models.fields.TextField', [], {}),
'genre': ('django.db.models.fields.TextField', [], {}),
'hobbies': ('django.db.models.fields.TextField', [], {}),
'howfound': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'picurl': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.UserRole']"}),
'scene': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'core.userrole': {
'Meta': {'object_name': 'UserRole'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plural': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['core']
|
chipsecintel/chipsec
|
source/tool/chipsec/modules/tools/vmm/hv/define.py
|
Python
|
gpl-2.0
| 23,148
| 0.007776
|
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2016, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin S
|
treet, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
"""
Hyper-V specific defines
"""
import re
msrs = {
|
0x40000000: 'HV_X64_MSR_GUEST_OS_ID',
0x40000001: 'HV_X64_MSR_HYPERCALL',
0x40000002: 'HV_X64_MSR_VP_INDEX',
0x40000003: 'HV_X64_MSR_RESET',
0x40000010: 'HV_X64_MSR_VP_RUNTIME',
0x40000020: 'HV_X64_MSR_TIME_REF_COUNT',
0x40000021: 'HV_X64_MSR_REFERENCE_TSC',
0x40000022: 'HV_X64_MSR_TSC_FREQUENCY',
0x40000023: 'HV_X64_MSR_APIC_FREQUENCY',
0x40000070: 'HV_X64_MSR_EOI',
0x40000071: 'HV_X64_MSR_ICR',
0x40000072: 'HV_X64_MSR_TPR',
0x40000073: 'HV_X64_MSR_APIC_ASSIST_PAGE',
0x40000080: 'HV_X64_MSR_SCONTROL',
0x40000081: 'HV_X64_MSR_SVERSION',
0x40000082: 'HV_X64_MSR_SIEFP',
0x40000083: 'HV_X64_MSR_SIMP',
0x40000084: 'HV_X64_MSR_EOM',
0x40000090: 'HV_X64_MSR_SINT0',
0x40000091: 'HV_X64_MSR_SINT1',
0x40000092: 'HV_X64_MSR_SINT2',
0x40000093: 'HV_X64_MSR_SINT3',
0x40000094: 'HV_X64_MSR_SINT4',
0x40000095: 'HV_X64_MSR_SINT5',
0x40000096: 'HV_X64_MSR_SINT6',
0x40000097: 'HV_X64_MSR_SINT7',
0x40000098: 'HV_X64_MSR_SINT8',
0x40000099: 'HV_X64_MSR_SINT9',
0x4000009A: 'HV_X64_MSR_SINT10',
0x4000009B: 'HV_X64_MSR_SINT11',
0x4000009C: 'HV_X64_MSR_SINT12',
0x4000009D: 'HV_X64_MSR_SINT13',
0x4000009E: 'HV_X64_MSR_SINT14',
0x4000009F: 'HV_X64_MSR_SINT15',
0x400000B0: 'HV_X64_MSR_STIMER0_CONFIG',
0x400000B1: 'HV_X64_MSR_STIMER0_COUNT',
0x400000B2: 'HV_X64_MSR_STIMER1_CONFIG',
0x400000B3: 'HV_X64_MSR_STIMER1_COUNT',
0x400000B4: 'HV_X64_MSR_STIMER2_CONFIG',
0x400000B5: 'HV_X64_MSR_STIMER2_COUNT',
0x400000B6: 'HV_X64_MSR_STIMER3_CONFIG',
0x400000B7: 'HV_X64_MSR_STIMER3_COUNT',
0x400000C1: 'HV_X64_MSR_POWER_STATE_TRIGGER_C1',
0x400000C2: 'HV_X64_MSR_POWER_STATE_TRIGGER_C2',
0x400000C3: 'HV_X64_MSR_POWER_STATE_TRIGGER_C3',
0x400000D1: 'HV_X64_MSR_POWER_STATE_CONFIG_C1',
0x400000D2: 'HV_X64_MSR_POWER_STATE_CONFIG_C2',
0x400000D3: 'HV_X64_MSR_POWER_STATE_CONFIG_C3',
0x400000E0: 'HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE',
0x400000E1: 'HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE',
0x400000E2: 'HV_X64_MSR_STATS_VP_RETAIL_PAGE',
0x400000E3: 'HV_X64_MSR_STATS_VP_INTERNAL_PAGE',
0x400000F0: 'HV_X64_MSR_GUEST_IDLE',
0x400000F1: 'HV_X64_MSR_SYNTH_DEBUG_CONTROL',
0x400000F2: 'HV_X64_MSR_SYNTH_DEBUG_STATUS',
0x400000F3: 'HV_X64_MSR_SYNTH_DEBUG_SEND_BUFFER',
0x400000F4: 'HV_X64_MSR_SYNTH_DEBUG_RECEIVE_BUFFER',
0x400000F5: 'HV_X64_MSR_SYNTH_DEBUG_PENDING_BUFFER',
0x40000100: 'HV_X64_MSR_CRASH_P0',
0x40000101: 'HV_X64_MSR_CRASH_P1',
0x40000102: 'HV_X64_MSR_CRASH_P2',
0x40000103: 'HV_X64_MSR_CRASH_P3',
0x40000104: 'HV_X64_MSR_CRASH_P4',
0x40000105: 'HV_X64_MSR_CRASH_CTL'
}
def get_msr_name(code, defvalue = ''):
return msrs[code] if code in msrs else defvalue
hypercall_status_codes = {
0x0000: 'HV_STATUS_SUCCESS',
0x0001: 'HV_RESERVED_01H',
0x0002: 'HV_STATUS_INVALID_HYPERCALL_CODE',
0x0003: 'HV_STATUS_INVALID_HYPERCALL_INPUT',
0x0004: 'HV_STATUS_INVALID_ALIGNMENT',
0x0005: 'HV_STATUS_INVALID_PARAMETER',
0x0006: 'HV_STATUS_ACCESS_DENIED',
0x0007: 'HV_STATUS_INVALID_PARTITION_STATE',
0x0008: 'HV_STATUS_OPERATION_DENIED',
0x0009: 'HV_STATUS_UNKNOWN_PROPERTY',
0x000A: 'HV_STATUS_PROPERTY_VALUE_OUT_OF_RANGE',
0x000B: 'HV_STATUS_INSUFFICIENT_MEMORY',
0x000C: 'HV_STATUS_PARTITION_TOO_DEEP',
0x000D: 'HV_STATUS_INVALID_PARTITION_ID',
0x000E: 'HV_STATUS_INVALID_VP_INDEX',
0x000F: 'HV_RESERVED_0FH',
0x0010: 'HV_RESERVED_10H',
0x0011: 'HV_STATUS_INVALID_PORT_ID',
0x0012: 'HV_STATUS_INVALID_CONNECTION_ID',
0x0013: 'HV_STATUS_INSUFFICIENT_BUFFERS',
0x0014: 'HV_STATUS_NOT_ACKNOWLEDGED',
0x0015: 'HV_RESERVED_15H',
0x0016: 'HV_STATUS_ACKNOWLEDGED',
0x0017: 'HV_STATUS_INVALID_SAVE_RESTORE_STATE',
0x0018: 'HV_STATUS_INVALID_SYNIC_STATE',
0x0019: 'HV_STATUS_OBJECT_IN_USE',
0x001A: 'HV_STATUS_INVALID_PROXIMITY_DOMAIN_INFO',
0x001B: 'HV_STATUS_NO_DATA',
0x001C: 'HV_STATUS_INACTIVE',
0x001D: 'HV_STATUS_NO_RESOURCES',
0x001E: 'HV_STATUS_FEATURE_UNAVAILABLE',
0x001F: 'HV_STATUS_PARTIAL_PACKET',
0x0020: 'HV_STATUS_PROCESSOR_FEATURE_SSE3_NOT_SUPPORTED',
0x0021: 'HV_STATUS_PROCESSOR_FEATURE_LAHFSAHF_NOT_SUPPORTED',
0x0022: 'HV_STATUS_PROCESSOR_FEATURE_SSSE3_NOT_SUPPORTED',
0x0023: 'HV_STATUS_PROCESSOR_FEATURE_SSE4_1_NOT_SUPPORTED',
0x0024: 'HV_STATUS_PROCESSOR_FEATURE_SSE4_2_NOT_SUPPORTED',
0x0025: 'HV_STATUS_PROCESSOR_FEATURE_SSE4A_NOT_SUPPORTED',
0x0026: 'HV_STATUS_PROCESSOR_FEATURE_XOP_NOT_SUPPORTED',
0x0027: 'HV_STATUS_PROCESSOR_FEATURE_POPCNT_NOT_SUPPORTED',
0x0028: 'HV_STATUS_PROCESSOR_FEATURE_CMPXCHG16B_NOT_SUPPORTED',
0x0029: 'HV_STATUS_PROCESSOR_FEATURE_ALTMOVCR8_NOT_SUPPORTED',
0x002A: 'HV_STATUS_PROCESSOR_FEATURE_LZCNT_NOT_SUPPORTED',
0x002B: 'HV_STATUS_PROCESSOR_FEATURE_MISALIGNED_SSE_NOT_SUPPORTED',
0x002C: 'HV_STATUS_PROCESSOR_FEATURE_MMX_EXT_NOT_SUPPORTED',
0x002D: 'HV_STATUS_PROCESSOR_FEATURE_3DNOW_NOT_SUPPORTED',
0x002E: 'HV_STATUS_PROCESSOR_FEATURE_EXTENDED_3DNOW_NOT_SUPPORTED',
0x002F: 'HV_STATUS_PROCESSOR_FEATURE_PAGE_1GB_NOT_SUPPORTED',
0x0030: 'HV_STATUS_PROCESSOR_CACHE_LINE_FLUSH_SIZE_INCOMPATIBLE',
0x0031: 'HV_STATUS_PROCESSOR_FEATURE_XSAVE_NOT_SUPPORTED',
0x0032: 'HV_STATUS_PROCESSOR_FEATURE_XSAVEOPT_NOT_SUPPORTED',
0x0033: 'HV_STATUS_INSUFFICIENT_BUFFER',
0x0034: 'HV_STATUS_PROCESSOR_FEATURE_XSAVE_AVX_NOT_SUPPORTED',
0x0035: 'HV_STATUS_PROCESSOR_FEATURE_XSAVE_FEATURE_NOT_SUPPORTED',
0x0036: 'HV_STATUS_PROCESSOR_XSAVE_SAVE_AREA_INCOMPATIBLE',
0x0037: 'HV_STATUS_INCOMPATIBLE_PROCESSOR',
0x0038: 'HV_STATUS_INSUFFICIENT_DEVICE_DOMAINS',
0x0039: 'HV_STATUS_PROCESSOR_FEATURE_AES_NOT_SUPPORTED',
0x003A: 'HV_STATUS_PROCESSOR_FEATURE_PCLMULQDQ_NOT_SUPPORTED',
0x003B: 'HV_STATUS_PROCESSOR_FEATURE_INCOMPATIBLE_XSAVE_FEATURES',
0x003C: 'HV_STATUS_CPUID_FEATURE_VALIDATION_ERROR',
0x003D: 'HV_STATUS_CPUID_XSAVE_FEATURE_VALIDATION_ERROR',
0x003E: 'HV_STATUS_PROCESSOR_STARTUP_TIMEOUT',
0x003F: 'HV_STATUS_SMX_ENABLED',
0x0040: 'HV_STATUS_PROCESSOR_FEATURE_PCID_NOT_SUPPORTED',
0x0041: 'HV_STATUS_INVALID_LP_INDEX',
0x0042: 'HV_STATUS_FEATURE_FMA4_NOT_SUPPORTED',
0x0043: 'HV_STATUS_FEATURE_F16C_NOT_SUPPORTED',
0x0044: 'HV_STATUS_PROCESSOR_FEATURE_RDRAND_NOT_SUPPORTED',
0x0045: 'HV_STATUS_PROCESSOR_FEATURE_RDWRFSGS_NOT_SUPPORTED',
0x00
|
ketch/effective_dispersion_RR
|
numerical_experiments/1D-propagation/FV_solution/sinusoidal/normal/write_slice.py
|
Python
|
bsd-2-clause
| 788
| 0.045685
|
from clawpack.petcl
|
aw.solution import Solution
#from petclaw.io.petsc import read_petsc
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as pl
from matplotlib import rc
#rc('text', usetex=True)
import numpy as np
import os
def write_slices(frame,fil
|
e_prefix,path,name):
sol=Solution(frame,file_format='petsc',path=path,read_aux=False,file_prefix=file_prefix)
x=sol.state.grid.x.centers; y=sol.state.grid.y.centers; my=len(y)
q=sol.state.q
f=open(name+'.txt','w')
#f.writelines(str(xc)+" "+str(q[0,i,my/4])+" "+str(q[0,i,3*my/4])+"\n" for i,xc in enumerate(x))
f.writelines(str(xc)+" "+str(sum(q[0,i,:])/my)+"\n" for i,xc in enumerate(x))
f.close()
if __name__== "__main__":
write_slices(970,'claw_p','./_output/_p/','stress_normal')
|
nonZero/demos-python
|
src/examples/short/technology/grep.py
|
Python
|
gpl-3.0
| 459
| 0
|
#!/usr/bin/python2
'''
Implemting grep in python in less than 10 lines o
|
f code...
'''
import re # for compile, finditer
import sys # for argv
# command line usage...
if len(sys.argv) < 3:
print('usage: grep.py [expr] [files...]')
sys.exit(1)
# first compile the regular expression...
c = re.compile(sys.argv[1])
for filename in sys.argv[2:]:
for num, l in enumerate(open(filename)):
if c.match(l):
print(filename, num, l)
| |
ICOS-Carbon-Portal/infrastructure
|
devops/roles/icos.jupyter/files/jusers.py
|
Python
|
gpl-3.0
| 11,637
| 0.001203
|
#!/opt/jusers/venv/bin/python3
import grp
import os
import pwd
import random
import spwd
import subprocess
import sys
import click
from concurrent.futures import ProcessPoolExecutor
from ruamel.yaml import YAML
PROJECT = '/project'
DRY_RUN = False
VERBOSE = False
USERSDB = '/root/jusers.yml'
PWDLENG = 10
# PLUGINS
# https://click.palletsprojects.com/en/7.x/commands/?highlight=plug#custom-multi-commands
class Plugins(click.MultiCommand):
folder = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'plugins')
def list_commands(self, ctx):
rv = []
for filename in os.listdir(self.folder):
if filename.endswith('.py'):
rv.append(filename[:-3])
rv.sort()
return rv
def get_command(self, ctx, name):
fn = os.path.join(self.folder, name + '.py')
ns = {'__file__': fn}
with open(fn) as f:
code = compile(f.read(), fn, 'exec')
eval(code, ns, ns)
return ns['cli']
# UTIL
def join(*args):
return os.path.join(*args)
def exists(*args):
return os.path.exists(os.path.join(*args))
def readlink(*args):
try:
return os.readlink(os.path.join(*args))
except Exception:
return None
# DOCKER
def user_database_differs():
c = 'sha1sum /etc/passwd /etc/shadow /etc/group'
r = subprocess.run('diff -Zq <(docker exec -t hub %s) <(%s) > /dev/null' % (
c, c), check=False, shell=True, executable='/bin/bash')
return r.returncode != 0
def maybe_restart_hub(force=False):
if user_database_differs() or force:
yield "cd /docker/jupyter && docker-compose restart hub"
def docker_id(user):
r = subprocess.run('docker exec jupyter-%s id %s' % (user, user),
shell=True, check=False, capture_output=True, text=1)
if r.returncode == 1 and "No such container" in r.stderr:
return None
if r.returncode == 1:
raise Exception(r.stderr)
return r.stdout.strip()
# USERS
def list_users():
users = []
for pw in pwd.getpwall():
if is_normal_user(pw.pw_name, pw.pw_uid):
users.append(pw.pw_name)
return sorted(users)
def list_passwordless_users(yaml):
for u in yaml['users']:
l = u['login']
sp = spwd.getspnam(l)
if sp.sp_pwdp in ('!', '*'):
yield l
def is_normal_user(user, uid=None):
if uid is None:
try:
uid = pwd.getpwnam(user).pw_uid
except KeyError: # returned for non-existing users
return False
return uid >= 1000 and user not in ['nobody', 'project']
# GROUPS
def is_group(name):
try:
grp.getgrnam(name)
return True
except KeyError:
return False
def list_project_groups():
return sorted(g.gr_name for g in grp.getgrall()
if exists(PROJECT, g.gr_name))
def list_user_groups(user):
try:
pw_gid = pwd.getpwnam(user).pw_gid
except KeyError: # user doesn't exist
return []
else:
return [g.gr_name for g in grp.getgrall()
if (g.gr_gid == pw_gid or user in g.gr_mem)
and exists(PROJECT, g.gr_name)
and g.gr_name != 'common']
def reset_passwords(reset):
ps = []
for u in reset:
p = random.randint(1*(10**PWDLENG), 1*(10**(PWDLENG+1))-1)
ps.append((u, p))
ls = '\n'.join('%s:%s' % (u, p) for u, p in ps)
subprocess.run(['/sbin/chpasswd'], input=ls, check=1, text=1)
print("Reset passwords:\n%s" % '\n'.join(' %-15s%s' % (u, p) for u, p in ps))
# GENERATE COMMANDS
def add_group(group):
if is_group(group):
yield "echo %s already exists" % group
return
if is_normal_user(group):
yield 'abort %s is the name of a regular user' % group
return
r = join(PROJECT, group)
if exists(r):
yield "abort %s exists although the group does not" % group
return
yield 'addgroup %s' % group
yield 'mkdir -p %s/store' % r
yield 'chmod 2770 %s' % r
yield 'chown project:%s %s' % (group, r)
yield 'echo added group %s' % group
def maybe_kick_all_users():
# First list all jupyter containers that are user notebook servers and
# build a dict that maps username to the output of "id user" on the host.
users = {}
s = subprocess.check_output(['docker', 'ps'], text=1)
for line in s.splitlines():
words = line.split()
if words[-1].startswith('jupyter-'):
_, user = words[-1].split('-', 1)
hid = subprocess.check_output(['id', user], text=1).strip()
users[user] = hid
# Next go through the usernames and spawn a background process that runs
# 'docker id jupyter-user id user'. This means that on a busy server we
# might start 50-100 'docker exec' processes.
# We then compare the output of 'id user' on the host vs in docker.
kick = []
with ProcessPoolExecutor() as exe:
jobs = [(user, exe.submit(docker_id, user)) for user in users]
for user, job in jobs:
try:
dock = job.result()
host = users[user]
if dock != host :
kick.append(('yesno "%s\'s user credentials have changed, '
'shut down their server? " && '
'docker rm -f jupyter-%s || :' % (user, user)))
except Exception as e:
print(user, "failed with", e)
return kick
def sync_new_groups(yaml):
e_groups = list_project_groups()
y_groups = yaml['groups']
for ng in set(y_groups) - set(e_groups):
yield from add_group(ng)
def sync_new_users(yaml):
y_users = yaml['users']
e_users = list_users()
for u in y_users:
l = u['login']
if l in e_users:
continue
yield 'adduser --disabled-password %s --shell /usr/sbin/nologin --gecos ""' % l
def sync_user_groups(yaml):
for u in yaml['users']:
l = u['login']
for g in set(u.get('groups', [])) - set(list_user_groups(l)):
yield 'adduser %s %s' % (l, g)
def sync_homedirs(yaml):
for u in yaml['users']:
login = u['login']
if not is_normal_user(login):
continue
home = pwd.getpwnam(login).pw_dir
pcom = join(PROJECT, 'common')
if readlink(home, 'common') != pcom:
yield 'ln -sf %s %s' % (pcom, home)
project = join(home, 'project')
if not exists(project):
yield 'mkdir -pm 0755 %s' % project
for g in list_user_groups(login):
p = join(PROJECT, g)
if readlink(project, g) != p:
yield 'ln -sf %s %s' % (p, project)
# RUN
def run(*gens):
flags = "set -eu"
if VERBOSE:
flags += "x"
init = ["#!/bin/bash", flags,
'error () { echo "$@" > /dev/stderr; }',
'abort () { echo "$@" > /dev/stderr; exit 1; }',
'yesno () { read -p "$* [yn]: " a; [[ $a == y ]]; }']
if
|
DRY_RUN:
cmds = [l for g in gens for l in g]
if len(cmds) == 0:
return print("Everything is synced.")
else:
return print('\n'.join(cmds))
ncs = 0
for g in gens:
cmds = init[:]
for line in g:
cmds.append(line)
ncs += 1
prog = '\n'.join(cmds)
r = subprocess.run(['/bin/bash', '-c', prog])
if r.returncode != 0:
print("abor
|
t.")
sys.exit(1)
if ncs == 0:
print("Everything is synced.")
# CLI
@click.group()
def cli():
pass
@cli.command()
@click.option('--force', '-f', is_flag=True, help="force restart")
def restart_hub(force):
"""Restart the hub.
All users are managed on the host system. The jupyter hub then keeps a copy
of the user database. This means that if the user database is updated,
then the hub needs to be restarted for those changes to take effect.
Running this command will restart the hub, but only if the hub doesn't have
the latest user database.
\b
$ jusers restart-hub
Every
|
mwisslead/vfp2py
|
testbed/test_lib.py
|
Python
|
mit
| 13,352
| 0.002097
|
# coding=utf-8
from __future__ import division, print_function
import datetime as dt
import math
import os
import random
import faker
from vfp2py import vfpfunc
from vfp2py.vfpfunc import DB, Array, C, F, M, S, lparameters, parameters, vfpclass
@lparameters()
def MAIN():
pass
@lparameters()
def select_tests():
assert DB.select_function(0 if vfpfunc.set('compatible') == 'OFF' else None) == 1
assert DB.select_function(0) == 1
assert DB.select_function(1) == 32767
assert DB.select_function(2) == 0
assert DB.select_function('test') == 0
@lparameters()
def chr_tests():
assert ord('\x00'[0]) == 0
@lparameters()
def set_tests():
assert vfpfunc.set('compatible') == 'OFF'
assert vfpfunc.set('compatible', 1) == 'PROMPT'
@lparameters()
def used_tests():
assert DB.used('test') == False
@lparameters()
def date_tests():
M.add_local('somedate')
S.somedate = dt.date(2017, 6, 30)
assert S.somedate == dt.date(2017, 6, 30)
assert vfpfunc.dow_fix(S.somedate.weekday()) == 6
assert S.somedate.strftime('%A') == 'Friday'
assert S.somedate.month == 6
assert S.somedate.strftime('%B') == 'June'
assert S.somedate.strftime('%d %B %Y') == '30 June 2017'
assert vfpfunc.dtos(S.somedate) == '20170630'
assert vfpfunc.dtoc(S.somedate) == '06/30/2017'
assert len(dt.datetime.now().time().strftime('%H:%M:%S')) == 8
assert len(dt.datetime.now().time().strftime('%H:%M:%S.%f')[:11]) == 11
assert dt.datetime.combine(S.somedate, dt.datetime.min.time()) == dt.datetime(2017, 6, 30, 0)
assert vfpfunc.gomonth(S.somedate, -4) == dt.date(2017, 2, 28)
assert vfpfunc.vartype(S.somedate) == 'D'
assert vfpfunc.vartype(dt.datetime.combine(S.somedate, dt.datetime.min.time())) == 'T'
@lparameters()
def math_tests():
M.add_local('num_value')
S.num_value = math.pi
assert round(math.pi, 2) == 3.14
assert abs(math.tan(math.radians(45)) - 1) < 0.001
assert abs(math.sin(math.radians(90)) - 1) < 0.001
assert abs(math.cos(math.radians(90)) - 0) < 0.001
assert abs(math.cos(math.radians(45)) - math.sqrt(2) / 2) < 0.001
assert 0 < random.random() and random.random() < 1
assert (5 % 2) == 1
M.add_local('stringval')
S.stringval = '1e5'
assert float(S.stringval) == 100000
assert vfpfunc.vartype(S.num_value) == 'N'
assert not ((True or True) and False)
assert True or False and True
@lparameters()
def string_tests():
S.cstring = 'AAA aaa, BBB bbb, CCC ccc.'
assert vfpfunc.vartype(S.cstring) == 'C'
assert len([w for w in S.cstring.split() if w]) == 6
|
assert len([w for w in S.cstring.split(',') if w]) == 3
assert len([w for w in S.cstring.split('.') if w]) == 1
assert vfpfunc.getwordnum(S.cstring, 2) == 'aaa,'
assert vfpfunc.getwordnum(S.cstring, 2, ',') == ' BBB bbb'
assert vfpfunc.getwordnum(S.cstring, 2
|
, '.') == ''
assert vfpfunc.like('Ab*t.???', 'About.txt')
assert not vfpfunc.like('Ab*t.???', 'about.txt')
assert not ''[:1].isalpha()
assert 'a123'[:1].isalpha()
assert not '1abc'[:1].isalpha()
assert not ''[:1].islower()
assert 'test'[:1].islower()
assert not 'Test'[:1].islower()
assert not ''[:1].isdigit()
assert '1abc'[:1].isdigit()
assert not 'a123'[:1].isdigit()
assert not ''[:1].isupper()
assert 'Test'[:1].isupper()
assert not 'test'[:1].isupper()
assert vfpfunc.isblank('')
assert not vfpfunc.isblank('test')
assert vfpfunc.isblank(None)
S.cstring = ' AAA '
assert S.cstring.strip() == 'AAA'
assert S.cstring.rstrip() == ' AAA'
assert S.cstring.lstrip() == 'AAA '
assert S.cstring.rstrip() == S.cstring.rstrip()
assert vfpfunc.strextract('This {{is}} a {{template}}', '{{', '}}') == 'is'
assert vfpfunc.strextract('This {{is}} a {{template}}', '{{', '}}', 2) == 'template'
assert vfpfunc.strextract('This {{is}} a {{template}}', '{{is}}') == ' a {{template}}'
assert vfpfunc.strextract('This {{is}} a {{template}}', '{{IS}}', '', 1, 1) == ' a {{template}}'
assert '123AAbbB'.lower().find('aab'.lower()) + 1 == 4
S.cstring = vfpfunc.text([' 123AAbbbB',
' TESTTEST',
' TEXTLINES',
' '], show=False)
assert S.cstring == '123AAbbbBTESTTESTTEXTLINES'
S.cstring = '123AAbbbB\r\nTESTTEST\r\nTEXTLINES'
assert vfpfunc.atline('T', S.cstring) == 2
assert vfpfunc.ratline('T', S.cstring) == 3
@lparameters()
def path_tests():
assert vfpfunc.home() == os.getcwd()
S.handle = open('test_lib_file', 'w')
S.handle.close()
assert not vfpfunc.isblank(vfpfunc.locfile('test_lib_file'))
os.chdir('..')
assert vfpfunc.home() != os.getcwd()
assert not vfpfunc.isblank(vfpfunc.locfile('test_lib_file'))
os.remove(os.path.join(vfpfunc.home(), 'test_lib_file'))
@lparameters()
def misc_tests():
assert vfpfunc.version() == 'Not FoxPro 9'
assert vfpfunc.version(4) == vfpfunc.version()
assert vfpfunc.version(5) == 900
@lparameters('seed')
def _add_db_record():
M.add_local('fake', 'fake_name', 'fake_st', 'fake_quantity', 'fake_received')
S.fake = faker.Faker()
S.fake.seed(S.seed)
S.fake_name = S.fake.name()
S.fake_st = S.fake.state_abbr()
S.fake_quantity = S.fake.random_int(0, 100)
S.fake_received = S.fake.boolean()
DB.insert('report', (S.fake_name, S.fake_st, S.fake_quantity, S.fake_received))
@lparameters('sqlconn', 'seed')
def _sqlexec_add_record():
M.add_local('fake', 'fake_name', 'fake_st', 'fake_quantity', 'fake_received')
S.fake = faker.Faker()
S.fake.seed(S.seed)
S.fake_name = S.fake.name()
S.fake_st = S.fake.state_abbr()
S.fake_quantity = S.fake.random_int(0, 100)
S.fake_received = S.fake.boolean()
S.sqlcmd = "insert into REPORT values ('" + S.fake_name + "','" + S.fake_st + "'," + vfpfunc.str(S.fake_quantity).strip() + ',' + vfpfunc.str(int(S.fake_received)).strip() + ')'
print(S.sqlcmd)
return vfpfunc.sqlexec(S.sqlconn, S.sqlcmd)
@lparameters()
def database_tests():
# FIX ME: SET SAFETY OFF
# FIX ME: SET ASSERTS ON
try:
DB.create_table('report', 'name c(50); st c(2); quantity n(5, 0); received l', 'free')
assert os.path.isfile('report.dbf')
assert DB.used('report')
try:
DB.use('report', 0, 'shared')
assert False
except Exception as err:
S.oerr = vfpfunc.Exception.from_pyexception(err)
print(S.oerr.message)
assert S.oerr.message == 'File is in use.'
_add_db_record(0)
_add_db_record(1)
_add_db_record(2)
_add_db_record(3)
assert DB.cpdbf() == 0
assert DB.fcount() == 4
DB.alter_table('report', 'add', 'age n(3, 0)')
assert DB.fcount() == 5
assert DB.field(2) == 'st'
assert not DB.found()
DB.goto(None, 0)
M.add_local('loopcount')
S.loopcount = 0
for _ in DB.scanner(scope=('rest',)):
assert len(S.name.strip()) > 0
S.loopcount += 1
assert S.loopcount == 4
DB.goto(None, 3)
S.loopcount = 0
for _ in DB.scanner(scope=('all',), condition=lambda: S.st.strip() == 'ID'):
assert len(S.name.strip()) > 0
S.loopcount += 1
assert S.loopcount == 2
S.loopcount = 0
for _ in DB.scanner(scope=('rest',), condition=lambda: S.st.strip() == 'ID'):
assert len(S.name.strip()) > 0
S.loopcount += 1
assert S.loopcount == 0
DB.goto(None, 0)
S.loopcount = 0
for _ in DB.scanner(scope=('rest',), condition=lambda: S.st.strip() == 'ID'):
assert len(S.name.strip()) > 0
S.loopcount += 1
assert S.loopcount == 2
del M.loopcount
assert S.name.strip() == 'Norma Fisher', S.name.strip() + ' should be Norma Fisher'
assert DB.recno() == 1
S.report_record = vfpfunc.scatter(totype='name')
assert S.report_record.name.strip() == 'Norma Fisher', S.report_record.name.str
|
qPCR4vir/orange3
|
Orange/widgets/classify/owsvmclassification.py
|
Python
|
bsd-2-clause
| 7,592
| 0.000264
|
from collections import OrderedDict
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
from Orange.data import Table
from Orange.classification.svm import SVMLearner, NuSVMLearner
from Orange.widgets import settings, gui
from Orange.widgets.utils.owlearnerwidget import OWBaseLearner
class OWBaseSVM(OWBaseLearner):
#: Kernel types
Linear, Poly, RBF, Sigmoid = 0, 1, 2, 3
#: Selected kernel type
kernel_type = settings.Setting(RBF)
#: kernel degree
degree = settings.Setting(3)
#: gamma
gamma = settings.Setting(1.0)
#: coef0 (adative constant)
coef0 = settings.Setting(0.0)
#: numerical tolerance
tol = settings.Setting(0.001)
kernels = (("Linear", "x⋅y"),
("Polynomial", "(g x⋅y + c)<sup>d</sup>"),
("RBF", "exp(-g|x-y|²)"),
("Sigmoid", "tanh(g x⋅y + c)"))
def _add_kernel_box(self):
# Initialize with the widest label to measure max width
self.kernel_eq = self.kernels[-1][1]
self.kernel_box = box = gui.hBox(self.controlArea, "Kernel")
buttonbox = gui.radioButtonsInBox(
box, self, "kernel_type", btnLabels=[k[0] for k in self.kernels],
callback=self._on_kernel_changed, addSpace=20)
buttonbox.layout().setSpacing(10)
gui.rubber(buttonbox)
parambox = gui.vBox(box)
gui.label(parambox, self, "Kernel: %(kernel_eq)s")
common = dict(orientation=Qt.Horizontal, callback=self.settings_changed,
alignment=Qt.AlignRight, controlWidth=80)
spbox = gui.hBox(parambox)
gui.rubber(spbox)
inbox = gui.vBox(spbox)
gamma = gui.doubleSpin(
inbox, self, "gamma", 0.0, 10.0, 0.01, label=" g: ", **common)
coef0 = gui.doubleSpin(
inbox, self, "coef0", 0.0, 10.0, 0.01, label=" c: ", **common)
degree = gui.doubleSpin(
inbox, self, "degree", 0.0, 10.0, 0.5, label=" d: ", **common)
self._kernel_params = [gamma, coef0, degree]
gui.rubber(parambox)
# This is the maximal height (all double spins are visible)
# and the maximal width (the label is initialized to the widest one)
box.layout().activate()
box.setFixedHeight(box.sizeHint().height())
box.setMinimumWidth(box.sizeHint().width())
def _add_optimization_box(self):
self.optimization_box = gui.vBox(
self.controlArea, "Optimization Parameters")
gui.doubleSpin(
self.optimization_box, self, "tol", 1e-6, 1.0, 1e-5,
label="Numerical tolerance:",
decimals=6, alignment=Qt.AlignRight, controlWidth=100,
callback=self.settings_changed
)
def add_main_layout(self):
self._add_type_box()
self._add_kernel_box()
|
self._add_optimization_box()
def _on_kernel_changed(self):
enabled = [[False, False, False], # linear
[True, True, True], # poly
|
[True, False, False], # rbf
[True, True, False]] # sigmoid
self.kernel_eq = self.kernels[self.kernel_type][1]
mask = enabled[self.kernel_type]
for spin, enabled in zip(self._kernel_params, mask):
[spin.box.hide, spin.box.show][enabled]()
self.settings_changed()
def _report_kernel_parameters(self, items):
if self.kernel_type == 0:
items["Kernel"] = "Linear"
elif self.kernel_type == 1:
items["Kernel"] = \
"Polynomial, ({g:.4} x⋅y + {c:.4})<sup>{d}</sup>".format(
g=self.gamma, c=self.coef0, d=self.degree)
elif self.kernel_type == 2:
items["Kernel"] = "RBF, exp(-{:.4}|x-y|²)".format(self.gamma)
else:
items["Kernel"] = "Sigmoid, tanh({g:.4} x⋅y + {c:.4})".format(
g=self.gamma, c=self.coef0)
def update_model(self):
super().update_model()
sv = None
if self.valid_data:
sv = self.data[self.model.skl_model.support_]
self.send("Support vectors", sv)
class OWSVMClassification(OWBaseSVM):
name = "SVM"
description = "Support Vector Machines map inputs to higher-dimensional " \
"feature spaces that best separate different classes. "
icon = "icons/SVM.svg"
priority = 50
LEARNER = SVMLearner
outputs = [("Support vectors", Table)]
# 0: c_svc, 1: nu_svc
svmtype = settings.Setting(0)
C = settings.Setting(1.0)
nu = settings.Setting(0.5)
shrinking = settings.Setting(True),
probability = settings.Setting(False)
max_iter = settings.Setting(100)
limit_iter = settings.Setting(True)
def _add_type_box(self):
form = QtGui.QGridLayout()
self.type_box = box = gui.radioButtonsInBox(
self.controlArea, self, "svmtype", [], box="SVM Type",
orientation=form, callback=self.settings_changed)
form.addWidget(gui.appendRadioButton(box, "C-SVM", addToLayout=False),
0, 0, Qt.AlignLeft)
form.addWidget(QtGui.QLabel("Cost (C):"),
0, 1, Qt.AlignRight)
form.addWidget(gui.doubleSpin(box, self, "C", 1e-3, 1000.0, 0.1,
decimals=3, alignment=Qt.AlignRight,
controlWidth=80, addToLayout=False,
callback=self.settings_changed),
0, 2)
form.addWidget(gui.appendRadioButton(box, "ν-SVM", addToLayout=False),
1, 0, Qt.AlignLeft)
form.addWidget(QtGui.QLabel("Complexity (ν):"),
1, 1, Qt.AlignRight)
form.addWidget(gui.doubleSpin(box, self, "nu", 0.05, 1.0, 0.05,
decimals=2, alignment=Qt.AlignRight,
controlWidth=80, addToLayout=False,
callback=self.settings_changed),
1, 2)
def _add_optimization_box(self):
super()._add_optimization_box()
gui.spin(self.optimization_box, self, "max_iter", 50, 1e6, 50,
label="Iteration limit:", checked="limit_iter",
alignment=Qt.AlignRight, controlWidth=100,
callback=self.settings_changed)
def create_learner(self):
kernel = ["linear", "poly", "rbf", "sigmoid"][self.kernel_type]
common_args = dict(
kernel=kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
tol=self.tol,
max_iter=self.max_iter if self.limit_iter else -1,
probability=True,
preprocessors=self.preprocessors
)
if self.svmtype == 0:
return SVMLearner(C=self.C, **common_args)
else:
return NuSVMLearner(nu=self.nu, **common_args)
def get_learner_parameters(self):
items = OrderedDict()
if self.svmtype == 0:
items["SVM type"] = "C-SVM, C={}".format(self.C)
else:
items["SVM type"] = "ν-SVM, ν={}".format(self.nu)
self._report_kernel_parameters(items)
items["Numerical tolerance"] = "{:.6}".format(self.tol)
items["Iteration limt"] = self.max_iter if self.limit_iter else "unlimited"
return items
if __name__ == "__main__":
app = QtGui.QApplication([])
w = OWSVMClassification()
w.set_data(Table("iris")[:50])
w.show()
app.exec_()
|
tomduijf/home-assistant
|
homeassistant/components/demo.py
|
Python
|
mit
| 5,021
| 0
|
"""
homeassistant.components.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sets up a demo environment that mimics interaction with devices.
"""
import time
import homeassistant.core as ha
import homeassistant.bootstrap as bootstrap
import homeassistant.loader as loader
from homeassistant.const import (
CONF_PLATFORM, ATTR_ENTITY_PICTURE, ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME)
DOMAIN = "demo"
DEPENDENCIES = ['introduction', 'conversation']
COMPONENTS_WITH_DEMO_PLATFORM = [
'switch', 'light', 'sensor', 'thermostat', 'media_player', 'notify']
def setup(hass, config):
""" Setup a demo environment. """
group = loader.get_com
|
ponent('group')
configurator = loader.get_component('configurator')
config.setdefault(ha.DOMAIN, {})
config.setdefault(DOMAIN, {})
if config[DOMAIN].get('hide_demo_state') != 1:
hass.states.set('a.Demo_Mode', 'Enabled')
# Setup sun
if not hass.config.latitude:
|
hass.config.latitude = 32.87336
if not hass.config.longitude:
hass.config.longitude = 117.22743
bootstrap.setup_component(hass, 'sun')
# Setup demo platforms
for component in COMPONENTS_WITH_DEMO_PLATFORM:
bootstrap.setup_component(
hass, component, {component: {CONF_PLATFORM: 'demo'}})
# Setup room groups
lights = sorted(hass.states.entity_ids('light'))
switches = sorted(hass.states.entity_ids('switch'))
media_players = sorted(hass.states.entity_ids('media_player'))
group.setup_group(hass, 'living room', [lights[2], lights[1], switches[0],
media_players[1]])
group.setup_group(hass, 'bedroom', [lights[0], switches[1],
media_players[0]])
# Setup IP Camera
bootstrap.setup_component(
hass, 'camera',
{'camera': {
'platform': 'generic',
'name': 'IP Camera',
'still_image_url': 'http://home-assistant.io/demo/webcam.jpg',
}})
# Setup scripts
bootstrap.setup_component(
hass, 'script',
{'script': {
'demo': {
'alias': 'Toggle {}'.format(lights[0].split('.')[1]),
'sequence': [{
'execute_service': 'light.turn_off',
'service_data': {ATTR_ENTITY_ID: lights[0]}
}, {
'delay': {'seconds': 5}
}, {
'execute_service': 'light.turn_on',
'service_data': {ATTR_ENTITY_ID: lights[0]}
}, {
'delay': {'seconds': 5}
}, {
'execute_service': 'light.turn_off',
'service_data': {ATTR_ENTITY_ID: lights[0]}
}]
}}})
# Setup scenes
bootstrap.setup_component(
hass, 'scene',
{'scene': [
{'name': 'Romantic lights',
'entities': {
lights[0]: True,
lights[1]: {'state': 'on', 'xy_color': [0.33, 0.66],
'brightness': 200},
}},
{'name': 'Switch on and off',
'entities': {
switches[0]: True,
switches[1]: False,
}},
]})
# Setup fake device tracker
hass.states.set("device_tracker.paulus", "home",
{ATTR_ENTITY_PICTURE:
"http://graph.facebook.com/297400035/picture",
ATTR_FRIENDLY_NAME: 'Paulus'})
hass.states.set("device_tracker.anne_therese", "not_home",
{ATTR_FRIENDLY_NAME: 'Anne Therese',
'latitude': hass.config.latitude + 0.002,
'longitude': hass.config.longitude + 0.002})
hass.states.set("group.all_devices", "home",
{
"auto": True,
ATTR_ENTITY_ID: [
"device_tracker.paulus",
"device_tracker.anne_therese"
]
})
# Setup configurator
configurator_ids = []
def hue_configuration_callback(data):
""" Fake callback, mark config as done. """
time.sleep(2)
# First time it is called, pretend it failed.
if len(configurator_ids) == 1:
configurator.notify_errors(
configurator_ids[0],
"Failed to register, please try again.")
configurator_ids.append(0)
else:
configurator.request_done(configurator_ids[0])
request_id = configurator.request_config(
hass, "Philips Hue", hue_configuration_callback,
description=("Press the button on the bridge to register Philips Hue "
"with Home Assistant."),
description_image="/static/images/config_philips_hue.jpg",
submit_caption="I have pressed the button"
)
configurator_ids.append(request_id)
return True
|
Ircam-Web/mezzanine-organization
|
organization/job/test/tests.py
|
Python
|
agpl-3.0
| 10,376
| 0.001157
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mezzanine.utils.tests import TestCase
from organization.job.models import JobOffer, Candidacy, JobResponse
# from organization.job.admin import *
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core import urlresolvers
from django.contrib.auth import get_user_model as User
class URLTests(TestCase):
def setUp(self):
super(URLTests, self).setUp()
self.job_offer = JobOffer.objects.create(
title="django dev",
email="testing@email.fr",
type="internship",
content="python"
)
self.candidacy = Candidacy.objects.create(
title="research",
text_button_external="more"
)
def test_job_offer_detail_url(self):
response = self.client.get('/job-offer/' + self.job_offer.slug + "/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "python")
self.assertTemplateUsed(response, "job/job_offer_detail.html")
def test_basic_job_offer_url(self):
response = self.client.get('/job-offer/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "django-dev")
self.assertTemplateUsed(response, "job/job_offer_list.html")
def test_basic_candidacies_url(self):
response = self.client.get('/candidacies/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "research")
self.assertTemplateUsed(response, "job/candidacy_list.html")
def test_candidacies_autocomplete(self):
response = self.client.get('/candidacy-autocomplete/')
self.assertEqual(response.status_code, 200)
class JobOfferTests(TestCase):
def setUp(self):
super(JobOfferTests, self).setUp()
app = "organization_job"
model = "joboffer"
self.url = urlresolvers.reverse("admin:%s_%s_add" % (app, model))
self.file = SimpleUploadedFile('letter.txt'.encode(), 'content'.encode())
self.job_offer = JobOffer.objects.create(
email="test@test.fr",
type="internship"
)
self.job_response = JobResponse.objects.create(
first_name="jean",
last_
|
name="dupont",
email="jean@dupont.fr",
message="I want this job",
curriculum_vitae=self.file,
cover_letter=self.file,
job_offer=self.job_offer
)
def test_job_offer_display_for_everyone(self):
self.client.logout()
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(r
|
esponse.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
self.client.login(username='user', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
self.client.login(username='test', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "job/job_offer_detail.html")
def test_job_offer_admin(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='user', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='test', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_job_offer_admin_creation(self):
self.client.login(username='test', password='test')
nmb = JobOffer.objects.count()
response = self.client.post(
self.url,
{
"title": 'title',
"status": 2,
"email": 'email@email.fr',
"type": 'internship',
'job_response-INITIAL_FORMS': '0',
'job_response-TOTAL_FORMS': '1'
}
)
self.assertEqual(response.status_code, 302)
self.assertEqual(nmb+1, JobOffer.objects.count())
def test_job_offer_admin_edition(self):
self.client.logout()
response = self.client.get(self.job_offer.get_absolute_url())
self.assertNotContains(response, "editable")
self.client.login(username='user', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertNotContains(response, "editable")
self.client.login(username='test', password='test')
response = self.client.get(self.job_offer.get_absolute_url())
self.assertContains(response, "editable")
def test_job_offer_creation(self):
self.assertTrue(isinstance(self.job_offer, JobOffer))
self.assertEqual(self.job_offer.email, "test@test.fr")
self.assertEqual(self.job_offer.type, "internship")
def test_job_offer_retrieval(self):
self.assertTrue(self.job_offer in JobOffer.objects.all())
self.assertTrue(self.job_offer in JobOffer.objects.filter(email="test@test.fr"))
self.assertTrue(self.job_offer in JobOffer.objects.filter(type="internship"))
def test_job_offer_update(self):
self.job_offer.email = "test@django.fr"
self.assertEqual(1, JobOffer.objects.filter(email="test@test.fr").count())
self.assertEqual(0, JobOffer.objects.filter(email="test@django.fr").count())
self.job_offer.save()
self.assertEqual(0, JobOffer.objects.filter(email="test@test.fr").count())
self.assertEqual(1, JobOffer.objects.filter(email="test@django.fr").count())
class JobResponseTests(TestCase):
def setUp(self):
super(JobResponseTests, self).setUp()
app = "organization_job"
model = "joboffer"
self.user = User().objects.create_user(username="user", password='test')
self.file = SimpleUploadedFile('letter.txt'.encode(), 'content'.encode())
self.job_offer = JobOffer.objects.create(
email="test@test.fr",
type="internship"
)
self.job_response = JobResponse.objects.create(
first_name="jean",
last_name="dupont",
email="jean@dupont.fr",
message="I want this job",
curriculum_vitae=self.file,
cover_letter=self.file,
job_offer=self.job_offer
)
self.url = urlresolvers.reverse(
"admin:%s_%s_change" % (app, model),
args=(self.job_offer.id,)
)
def test_job_response_fk_deletion(self):
self.job_offer.delete()
self.assertTrue(
self.job_response in JobResponse.objects.filter(
job_offer__isnull=True
)
)
def test_job_response_not_display_for_everyone(self):
self.client.logout()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='user', password='test')
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
self.client.login(username='
|
magenta/note-seq
|
note_seq/constants.py
|
Python
|
apache-2.0
| 2,620
| 0
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" B
|
ASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Constants for music processing in Magenta."""
# Meter-related constants.
DEFAULT_QUARTERS_PER_MINUTE = 120.0
DEFAULT_STEPS_PER_BAR = 16 # 4/4 music sampled at 4 steps per quarter note.
DEFAULT_STEPS_PER_QUARTER = 4
#
|
Default absolute quantization.
DEFAULT_STEPS_PER_SECOND = 100
# Standard pulses per quarter.
# https://en.wikipedia.org/wiki/Pulses_per_quarter_note
STANDARD_PPQ = 220
# Special melody events.
NUM_SPECIAL_MELODY_EVENTS = 2
MELODY_NOTE_OFF = -1
MELODY_NO_EVENT = -2
# Other melody-related constants.
MIN_MELODY_EVENT = -2
MAX_MELODY_EVENT = 127
MIN_MIDI_PITCH = 0 # Inclusive.
MAX_MIDI_PITCH = 127 # Inclusive.
NUM_MIDI_PITCHES = MAX_MIDI_PITCH - MIN_MIDI_PITCH + 1
NOTES_PER_OCTAVE = 12
# Velocity-related constants.
MIN_MIDI_VELOCITY = 1 # Inclusive.
MAX_MIDI_VELOCITY = 127 # Inclusive.
# Program-related constants.
MIN_MIDI_PROGRAM = 0
MAX_MIDI_PROGRAM = 127
# MIDI programs that typically sound unpitched.
UNPITCHED_PROGRAMS = (
list(range(96, 104)) + list(range(112, 120)) + list(range(120, 128)))
# Chord symbol for "no chord".
NO_CHORD = 'N.C.'
# The indices of the pitch classes in a major scale.
MAJOR_SCALE = [0, 2, 4, 5, 7, 9, 11]
# NOTE_KEYS[note] = The major keys that note belongs to.
# ex. NOTE_KEYS[0] lists all the major keys that contain the note C,
# which are:
# [0, 1, 3, 5, 7, 8, 10]
# [C, C#, D#, F, G, G#, A#]
#
# 0 = C
# 1 = C#
# 2 = D
# 3 = D#
# 4 = E
# 5 = F
# 6 = F#
# 7 = G
# 8 = G#
# 9 = A
# 10 = A#
# 11 = B
#
# NOTE_KEYS can be generated using the code below, but is explicitly declared
# for readability:
# NOTE_KEYS = [[j for j in range(12) if (i - j) % 12 in MAJOR_SCALE]
# for i in range(12)]
NOTE_KEYS = [
[0, 1, 3, 5, 7, 8, 10],
[1, 2, 4, 6, 8, 9, 11],
[0, 2, 3, 5, 7, 9, 10],
[1, 3, 4, 6, 8, 10, 11],
[0, 2, 4, 5, 7, 9, 11],
[0, 1, 3, 5, 6, 8, 10],
[1, 2, 4, 6, 7, 9, 11],
[0, 2, 3, 5, 7, 8, 10],
[1, 3, 4, 6, 8, 9, 11],
[0, 2, 4, 5, 7, 9, 10],
[1, 3, 5, 6, 8, 10, 11],
[0, 2, 4, 6, 7, 9, 11]
]
|
bambooom/OMOOC2py
|
_src/om2py5w/5wex0/client.py
|
Python
|
mit
| 2,054
| 0.039435
|
# -*- coding: utf-8 -*-
# author: bambooom
'''
My Diary Web App - CLI for client
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import requests
from bs4 import BeautifulSoup
import re
HELP = '''
Input h/help/? for help.
Input q/quit to quit the process.
Input s/sync to sync the diary log.
Input lt/ListTags to list all tags.
Input st:TAG to set or delete tags
Input FLUSH to clear all diary entries.
'''
url = "http://bambooomdiary.sinaapp.com/"
def get_log_all():
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
log = ''
for i in soup.find_all('pre'):
log += i.get_text()+'\n'
return log
def get_log_bytag(tags):
response = requests.get(url)
soup = BeautifulSoup(response.text,"html.parser")
ti=list(soup.find_all('i', class_='etime'))
ta=list(soup.find_all('i', class_='tags'))
di=list(soup.find_all('pre',class_='diary'))
for i in range(len(list(ti))):
if ta[i].get_text() == 'TAG:'+tags:
print "%s %s" %(ti[i].get_text(),di[i].get_text())
def get_tags():
response = requests.get(url)
soup = BeautifulSoup(response.text, "html.parser")
temp =[]
for i in soup.find_all('i', class_='tags'):
temp.append(i.get_text())
tag_set = list(set(temp))
for i in tag_set:
print i
def delete_log():
res = raw_input('ARE YOU SURE?(y/n)>')
if res.lower() == 'y':
response = requests.delete(url)
print "All clear!Restart a new diary!"
else:
print "Well, keep going on!"
def write_log(message, tags):
values = {'newdiary':message,'tags':tags}
response = requests.post(url, data=values)
def client():
print HELP
tags=''
while True:
print 'TAG:'+tags
message = raw_input('Input>')
if message in ['h','help','?']:
print HELP
elif message in ['s','sync']:
get_log_bytag(ta
|
gs)
elif message in ['q','quit']:
print 'Bye~'
break
elif message in ['lt','ListTags']:
get_tags()
elif message.startswith('st:'):
tags = message[3:]
elif message == 'FLUSH':
delete_log()
else:
write_log(message,tags)
if __name__ ==
|
'__main__':
client()
|
SkyLothar/requests-aliyun
|
tests/test-utils.py
|
Python
|
apache-2.0
| 2,134
| 0
|
# -*- coding: utf8 -*-
import base64
import hashlib
import io
import nose
import requests
import aliyunauth.utils
import aliyunauth.consts
def test_cal_b64md5():
s_data = b"foo"
l_data = b"bar" * aliyunauth.consts.MD5_CHUNK_SIZE
# normal data, None
nose.tools.eq_(aliyunauth.utils.cal_b64md5(None), None)
def b64md5(data):
return base64.b64encode(hashlib.md5(data).digest()).decode("utf8")
# normal data, small size, bytes
nose.tools.eq_(aliyunauth.utils.cal_b64md5(s_data), b64md5(s_data))
# normal data, small size, bytes
nose.tools.eq_(
aliyunauth.utils.cal_b64md5(s_data.decode("utf8")), b64md5(s_data)
)
# io-like, big size, bytes
nose.tools.eq_(
aliyunauth.utils.cal_b64md5(io.BytesIO(l_data)), b64md5(l_data)
)
# io-like, big
|
size,
|
str
nose.tools.eq_(
aliyunauth.utils.cal_b64md5(io.StringIO(l_data.decode("utf8"))),
b64md5(l_data)
)
def test_to_bytes():
nose.tools.ok_(isinstance(
aliyunauth.utils.to_bytes(u"foo"),
requests.compat.bytes
))
nose.tools.ok_(isinstance(
aliyunauth.utils.to_bytes(b"foo"),
requests.compat.bytes
))
nose.tools.eq_(aliyunauth.utils.to_bytes(u"福", "gb2312"), b'\xb8\xa3')
def test_to_str():
nose.tools.ok_(isinstance(
aliyunauth.utils.to_str(u"bar"),
requests.compat.str
), "unicode to str failed")
nose.tools.ok_(isinstance(
aliyunauth.utils.to_str(b"bar"),
requests.compat.str
), "bytes to str failed")
nose.tools.eq_(aliyunauth.utils.to_str(b"\xb0\xf4", "gb2312"), u"棒")
def test_percent_quote():
nose.tools.eq_(
aliyunauth.utils.percent_quote(u"福棒 &?/*~=+foo\""),
"%E7%A6%8F%E6%A3%92%20%26%3F%2F%2A~%3D%2Bfoo%22"
)
def test_percent_encode():
nose.tools.eq_(
aliyunauth.utils.percent_encode([("福 棒", "foo+bar"), ("none", None)]),
"%E7%A6%8F%20%E6%A3%92=foo%2Bbar"
)
nose.tools.eq_(
aliyunauth.utils.percent_encode([("foo", "福"), ("bar", "棒")], True),
"bar=%E6%A3%92&foo=%E7%A6%8F"
)
|
colinhiggs/pyramid-jsonapi
|
test_project/test_project/tests.py
|
Python
|
agpl-3.0
| 143,952
| 0.001765
|
from collections import namedtuple
import configparser
from functools import lru_cache
import unittest
from unittest.mock import patch, mock_open
import transaction
import testing.postgresql
import webtest
import datetime
from pyramid.config import Configurator
from pyramid.paster import get_app
from sqlalchemy import create_engine
from sqlalchemy.exc import SAWarning
import test_project
import inspect
import os
import urllib
import warnings
import json
from parameterized import parameterized
import pyramid_jsonapi.metadata
from openapi_spec_validator import validate_spec
import pprint
import ltree
from pyramid_jsonapi.permissions import (
Permission,
Targets,
)
from test_project.models import (
DBSession,
Base,
Person,
Blog,
)
from test_project import test_data
cur_dir = os.path.dirname(
os.path.abspath(
inspect.getfile(inspect.currentframe())
)
)
parent_dir = os.path.dirname(cur_dir)
RelHalf = namedtuple('RelSide', 'collection rel many filters')
FilterInfo = namedtuple('FilterInfo', 'att op value')
RelInfo = namedtuple('RelInfo', 'src tgt comment')
rel_infos = (
RelInfo(
RelHalf('people', 'blogs', False, []),
RelHalf(
'blogs', 'owner', True,
[
FilterInfo('title', 'eq', 'owned by 11'),
],
),
'One to many',
),
RelInfo(
RelHalf('blogs', 'owner', True, []),
RelHalf(
'people', 'blogs', False,
[
FilterInfo('name', 'eq', 'one thing'),
]
),
'Many to one'
),
RelInfo(
RelHalf('people', 'articles_by_assoc', True, []),
RelHalf(
'articles_by_assoc', 'authors', True,
[
FilterInfo('title', 'eq', 'Collaborative one.')
]
),
'Many to many by association table'
),
RelInfo(
RelHalf('people', 'articles_by_proxy', True, []),
RelHalf(
'articles_by_obj', None, True,
[
FilterInfo('title', 'eq', 'Collaborative by obj one.')
]
),
'Many to many by association proxy'
),
)
class MyTestApp(webtest.TestApp):
def _check_status(self, status, res):
try:
super()._check_status(status, res)
except webtest.AppError as e:
errors = res.json_body.get('errors', [{}])
raise webtest.AppError(
'%s\n%s',
errors, res.json_body.get('traceback')
)
def setUpModule():
'''Create a test DB and import data.'''
# Create a new database somewhere in /tmp
global postgresql
global engine
postgresql = testing.postgresql.Postgresql(port=7654)
engine = create_engine(postgresql.url())
ltree.add_ltree_extension(engine)
DBSession.configure(bind=engine)
def tearDownModule():
'''Throw away test DB.'''
global postgresql
DBSession.close()
postgresql.stop()
def rels_doc_func(func, i, param):
src, tgt, comment = param[0]
return '{}:{}/{} ({})'.format(func.__name__, src.collection, src.rel, comment)
def make_ri(_type, _id):
return { 'type': _type, 'id': _id }
class DBTestBase(unittest.TestCase):
_test_app = None
@classmethod
def setUpClass(cls):
cls._test_app = cls.new_test_app()
def setUp(self):
Base.metadata.create_all(engine)
# Add some basic test data.
test_data.add_to_db(engine)
transaction.begin()
def tearDown(self):
transaction.abort()
Base.metadata.drop_all(engine)
def test_app(self, options=None):
if (options is None) and self._test_app:
# If there are no options and we have a cached app, return it.
return self._test_app
return self.new_test_app(options)
@staticmethod
def new_test_app(options=None):
'''Create a test app.'''
config_path = '{}/testing.ini'.format(parent_dir)
if options:
tmp_cfg = configparser.ConfigParser()
tmp_cfg.read(config_path)
tmp_c
|
fg['app:main'].update(options or {})
config_path = '{}/tmp_testing.ini'.format(parent_dir)
with open(config_path, 'w') as tmp_file:
tmp_cfg.write(tmp_file)
with warnings.catch_warnings():
# Suppress SAWarning: about Property _jsonapi_id being replaced by
# Propery _jsonapi_id every time a new app is instantiated.
warn
|
ings.simplefilter(
"ignore",
category=SAWarning
)
app = get_app(config_path)
test_app = MyTestApp(app)
test_app._pj_app = app
if options:
os.remove(config_path)
return test_app
def evaluate_filter(self, att_val, op, test_val):
if op == 'eq':
return att_val == test_val
else:
raise Exception('Unkown filter op: {}'.format(op))
class TestTmp(DBTestBase):
'''To isolate tests so they can be run individually during development.'''
class TestPermissions(DBTestBase):
'''Test permission handling mechanisms.
'''
def test_get_alter_result_item(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see alice (people/1)
pj.view_classes[test_project.models.Person].register_permission_filter(
['read'],
['alter_result'],
lambda obj, *args, **kwargs: obj.object.name != 'alice',
)
# Shouldn't be allowed to see people/1 (alice)
test_app.get('/people/1', status=403)
# Should be able to see people/2 (bob)
test_app.get('/people/2')
def test_get_alter_result_item_individual_attributes(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
def pfilter(obj, view, mask, *args, **kwargs):
if obj.object.name == 'alice':
return view.permission_object(subtract_attributes={'age',})
else:
return True
pj.view_classes[test_project.models.Person].register_permission_filter(
['get'],
['alter_result', ],
pfilter,
)
# Alice should have attribute 'name' but not 'age'.
alice = test_app.get('/people/1').json_body['data']
self.assertIn('name', alice['attributes'])
self.assertNotIn('age', alice['attributes'])
def test_get_alter_result_item_individual_rels(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
def pfilter(obj, view, target, **kwargs):
if obj.object.name == 'alice' and target.name == 'posts':
return False
else:
return True
pj.view_classes[test_project.models.Person].register_permission_filter(
['get'],
['alter_result', ],
pfilter,
target_types=(Targets.relationship,)
)
# Alice should have relationship 'blogs' but not 'posts'.
alice = test_app.get('/people/1').json_body['data']
self.assertIn('blogs', alice['relationships'])
self.assertNotIn('posts', alice['relationships'])
def test_get_alter_result_item_rel_ids(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see blogs/1 (one of alice's 2 blogs)
pj.view_classes[test_project.models.Blog].register_permission_filter(
['get'],
['alter_result', ],
lambda obj, *args, **kwargs: obj.object.id != 1,
)
alice = test_app.get('/people/1').json_body['data']
alice_blogs = alice['relationships']['blogs']['data']
self.assertIn({'type': 'blogs', 'id': '2'}, alice_blogs)
self.assertNotIn({'type': 'blogs', 'id': '1'}, alice_blogs)
def test_get_alter_result_item_included_items(self):
test_app = self.test_app({})
pj = test_app._pj_app.pj
# Not allowed to see blogs/1 (one of alice's 2 blogs)
pj.view_classes[test_project.models.Blog].register_permission
|
louyihua/edx-platform
|
lms/djangoapps/grades/tests/test_grades.py
|
Python
|
agpl-3.0
| 18,149
| 0.000937
|
"""
Test grade calculation.
"""
import ddt
from django.conf import settings
from django.http import Http404
from django.test import TestCase
from mock import patch, MagicMock
from nose.plugins.attrib import attr
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from capa.tests.response_xml_factory import MultipleChoiceResponseXMLFactory
from courseware.module_render import get_module
from courseware.model_data import FieldDataCache, set_score
from courseware.tests.helpers import (
LoginEnrollmentTestCase,
get_request_for_user
)
from lms.djangoapps.course_blocks.api import get_course_blocks
from student.tests.factories import UserFactory
from student.models import CourseEnrollment
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from .. import course_grades
from ..course_grades import summary as grades_summary
from ..module_grades import get_module_score
from ..new.course_grade import CourseGrade, CourseGradeFactory
from ..new.subsection_grade import SubsectionGradeFactory
def _grade_with_errors(student, course):
"""This fake grade method will throw exceptions for student3 and
student4, but allow any other students to go through normal grading.
It's meant to simulate when something goes really wrong while trying to
grade a particular student, so we can test that we won't kill the entire
course grading run.
"""
if student.username in ['student3', 'student4']:
raise Exception("I don't like {}".format(student.username))
return grades_summary(student, course)
@attr(shard=1)
class TestGradeIteration(SharedModuleStoreTestCase):
"""
Test iteration through student gradesets.
"""
COURSE_NUM = "1000"
COURSE_NAME = "grading_test_course"
@classmethod
def setUpClass(cls):
super(TestGradeIteration, cls).setUpClass()
cls.course = CourseFactory.create(
display_name=cls.COURSE_NAME,
number=cls.COURSE_NUM
)
def setUp(self):
"""
Create a course and a handful of users to assign grades
"""
super(TestGradeIteration, self).setUp()
self.students = [
UserFactory.create(username='student1'),
UserFactory.create(username='student2'),
UserFactory.create(username='student3'),
UserFactory.create(username='student4'),
UserFactory.create(username='student5'),
]
def test_empty_student_list(self):
"""If we don't pass in any students, it should return a zero-length
iterator, but it shouldn't error."""
gradeset_results = list(course_grades.iterate_grades_for(self.course.id, []))
self.assertEqual(gradeset_results, [])
def test_nonexistent_course(self):
"""If the course we want to get grades for does not exist, a `Http404`
should be raised. This is a horrible crossing of abstraction boundaries
and should be fixed, but for now we're just testing the behavior. :-("""
with self.assertRaises(Http404):
gradeset_results = course_grades.iterate_grades_for(SlashSeparatedCourseKey("I", "dont", "exist"), [])
gradeset_results.next()
def test_all_empty_grades(self):
"""No students have grade entries"""
all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)
self.assertEqual(len(all_errors), 0)
for gradeset in all_gradesets.values():
self.assertIsNone(gradeset['grade'])
self.assertEqual(gradeset['percent'], 0.0)
@patch('lms.djangoapps.grades.course_grades.summary', _grade_with_errors)
def test_grading_exception(self):
"""Test that we correctly capture exception messages that bubble up from
grading. Note that we only see errors at this level if the grading
process for this student fails entirely due to an unexpected event --
having errors in the problem sets will not trigger this.
We patch the grade() method with our own, which will generate the errors
for student3 and student4.
"""
all_gradesets, all_errors = se
|
lf._gradesets_and_errors_for(self.course.id, self.students)
student1, student2, student3, student4, student5 = self.students
self.assertEqual(
all_errors,
{
student3: "I don't like student3",
student4: "I don't like student4"
}
)
# But we should still have five gradesets
sel
|
f.assertEqual(len(all_gradesets), 5)
# Even though two will simply be empty
self.assertFalse(all_gradesets[student3])
self.assertFalse(all_gradesets[student4])
# The rest will have grade information in them
self.assertTrue(all_gradesets[student1])
self.assertTrue(all_gradesets[student2])
self.assertTrue(all_gradesets[student5])
################################# Helpers #################################
def _gradesets_and_errors_for(self, course_id, students):
"""Simple helper method to iterate through student grades and give us
two dictionaries -- one that has all students and their respective
gradesets, and one that has only students that could not be graded and
their respective error messages."""
students_to_gradesets = {}
students_to_errors = {}
for student, gradeset, err_msg in course_grades.iterate_grades_for(course_id, students):
students_to_gradesets[student] = gradeset
if err_msg:
students_to_errors[student] = err_msg
return students_to_gradesets, students_to_errors
class TestProgressSummary(TestCase):
"""
Test the method that calculates the score for a given block based on the
cumulative scores of its children. This test class uses a hard-coded block
hierarchy with scores as follows:
a
+--------+--------+
b c
+--------------+-----------+ |
d e f g
+-----+ +-----+-----+ | |
h i j k l m n
(2/5) (3/5) (0/1) - (1/3) - (3/10)
"""
# Tell Django to clean out all databases, not just default
multi_db = True
def setUp(self):
super(TestProgressSummary, self).setUp()
self.course_key = CourseLocator(
org='some_org',
course='some_course',
run='some_run'
)
self.loc_a = self.create_location('chapter', 'a')
self.loc_b = self.create_location('section', 'b')
self.loc_c = self.create_location('section', 'c')
self.loc_d = self.create_location('vertical', 'd')
self.loc_e = self.create_location('vertical', 'e')
self.loc_f = self.create_location('vertical', 'f')
self.loc_g = self.create_location('vertical', 'g')
self.loc_h = self.create_location('problem', 'h')
self.loc_i = self.create_location('problem', 'i')
self.loc_j = self.create_location('problem', 'j')
self.loc_k = self.create_location('html', 'k')
self.loc_l = self.create_location('problem', 'l')
self.loc_m = self.create_location('html', 'm')
self.loc_n = self.create_location('problem', 'n')
weighted_scores = {
self.loc_h: self.create_score(2, 5),
self.loc_i: self.create_score(3, 5),
self.loc_j: self.create_score(0, 1),
self.loc_l: self.create_score(1, 3),
self.loc_n: self.create_score(3, 10),
}
locations_to_scored_children = {
self.loc_a: [self.loc_h, self.loc_i, self.loc_j, self.loc_l, self.loc_n],
self.loc_b: [self.loc_h, self.loc_i, self.loc_j, self.loc_l],
|
jdavisp3/TigerShark
|
tigershark/parsers/M277U_4010_X070.py
|
Python
|
bsd-3-clause
| 49,720
| 0.052474
|
#
# Generated by TigerShark.tools.convertPyX12 on 2012-07-10 16:29:58.682345
#
from tigershark.X12.parse import Message, Loop, Segment, Composite, Element, Properties
parsed_277U_HEADER = Loop( u'HEADER', Properties(looptype=u'wrapper',repeat=u'1',pos=u'015',req_sit=u'R',desc=u'Table 1 - Header'),
Segment( u'BHT', Properties(syntax='',req_sit
|
=u'R',repeat=u'1',pos=u'020',desc=u'Beginning of Hierarchical Transaction'),
Element( u'BHT01', Properties(desc=u'Hierarchical Structure Code', req_sit=u'R', data_type=(u'ID',u'4',u'4'), position=1,
codes=[u'0010'] ) ),
Element( u'BHT02', Properties(desc=u'Transaction Set Purpose Code', req_s
|
it=u'R', data_type=(u'ID',u'2',u'2'), position=2,
codes=[u'08'] ) ),
Element( u'BHT03', Properties(desc=u'Reference Identification', req_sit=u'R', data_type=(u'AN',u'1',u'50'), position=3,
codes=[] ) ),
Element( u'BHT04', Properties(desc=u'Date', req_sit=u'R', data_type=(u'DT',u'8',u'8'), position=4,
codes=[] ) ),
Element( u'BHT05', Properties(desc=u'Time', req_sit=u'N', data_type=(u'TM',u'4',u'8'), position=5,
codes=[] ) ),
Element( u'BHT06', Properties(desc=u'Transaction Type Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=6,
codes=[u'NO'] ) ),
),
)
parsed_277U_2100A = Loop( u'2100A', Properties(looptype='',repeat=u'>1',pos=u'050',req_sit=u'R',desc=u'Payer Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'050',desc=u'Payer Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'PR'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'R', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'N', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'N', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'N', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'21', u'AD', u'FI', u'NI', u'PI', u'PP', u'XV'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'R', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
Segment( u'PER', Properties(syntax='',req_sit=u'S',repeat=u'1',pos=u'080',desc=u'Payer Contact Information'),
Element( u'PER01', Properties(desc=u'Contact Function Code', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=1,
codes=[u'IC'] ) ),
Element( u'PER02', Properties(desc=u'Name', req_sit=u'S', data_type=(u'AN',u'1',u'60'), position=2,
codes=[] ) ),
Element( u'PER03', Properties(desc=u'Communication Number Qualifier', req_sit=u'R', data_type=(u'ID',u'2',u'2'), position=3,
codes=[u'ED', u'EM', u'TE'] ) ),
Element( u'PER04', Properties(desc=u'Communication Number', req_sit=u'R', data_type=(u'AN',u'1',u'256'), position=4,
codes=[] ) ),
Element( u'PER05', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=5,
codes=[u'EX'] ) ),
Element( u'PER06', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=6,
codes=[] ) ),
Element( u'PER07', Properties(desc=u'Communication Number Qualifier', req_sit=u'S', data_type=(u'ID',u'2',u'2'), position=7,
codes=[u'EX', u'FX'] ) ),
Element( u'PER08', Properties(desc=u'Communication Number', req_sit=u'S', data_type=(u'AN',u'1',u'256'), position=8,
codes=[] ) ),
Element( u'PER09', Properties(desc=u'Contact Inquiry Reference', req_sit=u'N', data_type=(u'AN',u'1',u'20'), position=9,
codes=[] ) ),
),
)
parsed_277U_2100B = Loop( u'2100B', Properties(looptype='',repeat=u'>1',pos=u'050',req_sit=u'R',desc=u'Information Receiver Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'050',desc=u'Information Receiver Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'41'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'R', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'46', u'FI', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'R', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
)
parsed_277U_2100C = Loop( u'2100C', Properties(looptype='',repeat=u'>1',pos=u'050',req_sit=u'R',desc=u'Provider Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'050',desc=u'Provider Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Code', req_sit=u'R', data_type=(u'ID',u'2',u'3'), position=1,
codes=[u'1P'] ) ),
Element( u'NM102', Properties(desc=u'Entity Type Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'1'), position=2,
codes=[u'1', u'2'] ) ),
Element( u'NM103', Properties(desc=u'Name Last or Organization Name', req_sit=u'R', data_type=(u'AN',u'1',u'60'), position=3,
codes=[] ) ),
Element( u'NM104', Properties(desc=u'Name First', req_sit=u'S', data_type=(u'AN',u'1',u'35'), position=4,
codes=[] ) ),
Element( u'NM105', Properties(desc=u'Name Middle', req_sit=u'S', data_type=(u'AN',u'1',u'25'), position=5,
codes=[] ) ),
Element( u'NM106', Properties(desc=u'Name Prefix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=6,
codes=[] ) ),
Element( u'NM107', Properties(desc=u'Name Suffix', req_sit=u'S', data_type=(u'AN',u'1',u'10'), position=7,
codes=[] ) ),
Element( u'NM108', Properties(desc=u'Identification Code Qualifier', req_sit=u'R', data_type=(u'ID',u'1',u'2'), position=8,
codes=[u'FI', u'SV', u'XX'] ) ),
Element( u'NM109', Properties(desc=u'Identification Code', req_sit=u'R', data_type=(u'AN',u'2',u'80'), position=9,
codes=[] ) ),
Element( u'NM110', Properties(desc=u'Entity Relationship Code', req_sit=u'N', data_type=(u'ID',u'2',u'2'), position=10,
codes=[] ) ),
Element( u'NM111', Properties(desc=u'Entity Identifier Code', req_sit=u'N', data_type=(u'ID',u'2',u'3'), position=11,
codes=[] ) ),
),
)
parsed_277U_2100D = Loop( u'2100D', Properties(looptype='',repeat=u'1',pos=u'050',req_sit=u'R',desc=u'Subscriber Name'),
Segment( u'NM1', Properties(syntax='',req_sit=u'R',repeat=u'1',pos=u'050',desc=u'Subscriber Name'),
Element( u'NM101', Properties(desc=u'Entity Identifier Cod
|
tzpBingo/github-trending
|
codespace/python/telegram/poll.py
|
Python
|
mit
| 12,268
| 0.004239
|
#!/usr/bin/env python
# pylint: disable=R0903
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2022
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at
|
your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received
|
a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains an object that represents a Telegram Poll."""
import datetime
import sys
from typing import TYPE_CHECKING, Any, Dict, List, Optional, ClassVar
from telegram import MessageEntity, TelegramObject, User, constants
from telegram.utils.helpers import from_timestamp, to_timestamp
from telegram.utils.types import JSONDict
if TYPE_CHECKING:
from telegram import Bot
class PollOption(TelegramObject):
"""
This object contains information about one answer option in a poll.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`text` and :attr:`voter_count` are equal.
Args:
text (:obj:`str`): Option text, 1-100 characters.
voter_count (:obj:`int`): Number of users that voted for this option.
Attributes:
text (:obj:`str`): Option text, 1-100 characters.
voter_count (:obj:`int`): Number of users that voted for this option.
"""
__slots__ = ('voter_count', 'text', '_id_attrs')
def __init__(self, text: str, voter_count: int, **_kwargs: Any):
self.text = text
self.voter_count = voter_count
self._id_attrs = (self.text, self.voter_count)
MAX_LENGTH: ClassVar[int] = constants.MAX_POLL_OPTION_LENGTH
""":const:`telegram.constants.MAX_POLL_OPTION_LENGTH`"""
class PollAnswer(TelegramObject):
"""
This object represents an answer of a user in a non-anonymous poll.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`poll_id`, :attr:`user` and :attr:`options_ids` are equal.
Attributes:
poll_id (:obj:`str`): Unique poll identifier.
user (:class:`telegram.User`): The user, who changed the answer to the poll.
option_ids (List[:obj:`int`]): Identifiers of answer options, chosen by the user.
Args:
poll_id (:obj:`str`): Unique poll identifier.
user (:class:`telegram.User`): The user, who changed the answer to the poll.
option_ids (List[:obj:`int`]): 0-based identifiers of answer options, chosen by the user.
May be empty if the user retracted their vote.
"""
__slots__ = ('option_ids', 'user', 'poll_id', '_id_attrs')
def __init__(self, poll_id: str, user: User, option_ids: List[int], **_kwargs: Any):
self.poll_id = poll_id
self.user = user
self.option_ids = option_ids
self._id_attrs = (self.poll_id, self.user, tuple(self.option_ids))
@classmethod
def de_json(cls, data: Optional[JSONDict], bot: 'Bot') -> Optional['PollAnswer']:
"""See :meth:`telegram.TelegramObject.de_json`."""
data = cls._parse_data(data)
if not data:
return None
data['user'] = User.de_json(data.get('user'), bot)
return cls(**data)
class Poll(TelegramObject):
"""
This object contains information about a poll.
Objects of this class are comparable in terms of equality. Two objects of this class are
considered equal, if their :attr:`id` is equal.
Attributes:
id (:obj:`str`): Unique poll identifier.
question (:obj:`str`): Poll question, 1-300 characters.
options (List[:class:`PollOption`]): List of poll options.
total_voter_count (:obj:`int`): Total number of users that voted in the poll.
is_closed (:obj:`bool`): :obj:`True`, if the poll is closed.
is_anonymous (:obj:`bool`): :obj:`True`, if the poll is anonymous.
type (:obj:`str`): Poll type, currently can be :attr:`REGULAR` or :attr:`QUIZ`.
allows_multiple_answers (:obj:`bool`): :obj:`True`, if the poll allows multiple answers.
correct_option_id (:obj:`int`): Optional. Identifier of the correct answer option.
explanation (:obj:`str`): Optional. Text that is shown when a user chooses an incorrect
answer or taps on the lamp icon in a quiz-style poll.
explanation_entities (List[:class:`telegram.MessageEntity`]): Optional. Special entities
like usernames, URLs, bot commands, etc. that appear in the :attr:`explanation`.
open_period (:obj:`int`): Optional. Amount of time in seconds the poll will be active
after creation.
close_date (:obj:`datetime.datetime`): Optional. Point in time when the poll will be
automatically closed.
Args:
id (:obj:`str`): Unique poll identifier.
question (:obj:`str`): Poll question, 1-300 characters.
options (List[:class:`PollOption`]): List of poll options.
is_closed (:obj:`bool`): :obj:`True`, if the poll is closed.
is_anonymous (:obj:`bool`): :obj:`True`, if the poll is anonymous.
type (:obj:`str`): Poll type, currently can be :attr:`REGULAR` or :attr:`QUIZ`.
allows_multiple_answers (:obj:`bool`): :obj:`True`, if the poll allows multiple answers.
correct_option_id (:obj:`int`, optional): 0-based identifier of the correct answer option.
Available only for polls in the quiz mode, which are closed, or was sent (not
forwarded) by the bot or to the private chat with the bot.
explanation (:obj:`str`, optional): Text that is shown when a user chooses an incorrect
answer or taps on the lamp icon in a quiz-style poll, 0-200 characters.
explanation_entities (List[:class:`telegram.MessageEntity`], optional): Special entities
like usernames, URLs, bot commands, etc. that appear in the :attr:`explanation`.
open_period (:obj:`int`, optional): Amount of time in seconds the poll will be active
after creation.
close_date (:obj:`datetime.datetime`, optional): Point in time (Unix timestamp) when the
poll will be automatically closed. Converted to :obj:`datetime.datetime`.
"""
__slots__ = (
'total_voter_count',
'allows_multiple_answers',
'open_period',
'options',
'type',
'explanation_entities',
'is_anonymous',
'close_date',
'is_closed',
'id',
'explanation',
'question',
'correct_option_id',
'_id_attrs',
)
def __init__(
self,
id: str, # pylint: disable=W0622
question: str,
options: List[PollOption],
total_voter_count: int,
is_closed: bool,
is_anonymous: bool,
type: str, # pylint: disable=W0622
allows_multiple_answers: bool,
correct_option_id: int = None,
explanation: str = None,
explanation_entities: List[MessageEntity] = None,
open_period: int = None,
close_date: datetime.datetime = None,
**_kwargs: Any,
):
self.id = id # pylint: disable=C0103
self.question = question
self.options = options
self.total_voter_count = total_voter_count
self.is_closed = is_closed
self.is_anonymous = is_anonymous
self.type = type
self.allows_multiple_answers = allows_multiple_answers
self.correct_option_id = correct_option_id
self.explanation = explanation
self.explanation_entities = explanation_entities
self.open_period = open_period
self.close_date = close_date
self._id
|
apple/coremltools
|
coremltools/proto/Gazetteer_pb2.py
|
Python
|
bsd-3-clause
| 4,337
| 0.007148
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Gazetteer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import DataStructures_pb2 as DataStructures__pb2
try:
FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes__pb2
except AttributeError:
FeatureTypes__pb2 = DataStructures__pb2.FeatureTypes_pb2
from .DataStructures_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='Gazetteer.proto',
package='CoreML.Specification.CoreMLModels',
syntax='proto3',
serialized_pb=_b('\n\x0fGazetteer.proto\x12!CoreML.Specification.CoreMLModels\x1a\x14\x44\x61taStructures.proto\"\x9c\x01\n\tGazetteer\x12\x10\n\x08revision\x18\x01 \x01(\r\x12\x10\n\x08language\x18\n \x01(\t\x12\x1a\n\x12modelParameterData\x18\x64 \x01(\x0c\x12@\n\x11stringClassLabels\x18\xc8\x01 \x01(\x0b\x32\".CoreML.Specification.StringVectorH\x00\x42\r\n\x0b\x43lassLabelsB\x02H\x03P\
|
x00\x62\x06proto3')
,
dependencies=[DataStructures__pb2.DESCRIPTOR,],
public_dependencies=[DataStructures__pb2.DESCRIPTOR,])
_GAZETTEER = _descriptor.Descriptor(
name='Gazetteer',
full_name='CoreML.Specification.CoreMLModels.Gazetteer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='revision', full_name='CoreML.Specification.CoreMLModels.Gazetteer.revision', index=0,
number=1, type=
|
13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language', full_name='CoreML.Specification.CoreMLModels.Gazetteer.language', index=1,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='modelParameterData', full_name='CoreML.Specification.CoreMLModels.Gazetteer.modelParameterData', index=2,
number=100, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='stringClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.stringClassLabels', index=3,
number=200, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='ClassLabels', full_name='CoreML.Specification.CoreMLModels.Gazetteer.ClassLabels',
index=0, containing_type=None, fields=[]),
],
serialized_start=77,
serialized_end=233,
)
_GAZETTEER.fields_by_name['stringClassLabels'].message_type = DataStructures__pb2._STRINGVECTOR
_GAZETTEER.oneofs_by_name['ClassLabels'].fields.append(
_GAZETTEER.fields_by_name['stringClassLabels'])
_GAZETTEER.fields_by_name['stringClassLabels'].containing_oneof = _GAZETTEER.oneofs_by_name['ClassLabels']
DESCRIPTOR.message_types_by_name['Gazetteer'] = _GAZETTEER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Gazetteer = _reflection.GeneratedProtocolMessageType('Gazetteer', (_message.Message,), dict(
DESCRIPTOR = _GAZETTEER,
__module__ = 'Gazetteer_pb2'
# @@protoc_insertion_point(class_scope:CoreML.Specification.CoreMLModels.Gazetteer)
))
_sym_db.RegisterMessage(Gazetteer)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003'))
# @@protoc_insertion_point(module_scope)
|
kerel-fs/skylines
|
tests/data/users.py
|
Python
|
agpl-3.0
| 1,007
| 0
|
# -*- coding: utf-8 -*-
"""Setup the SkyLines application"""
from faker import Faker
from skylines.model import User
def test_admin():
u = User()
u.first_name = u'Example'
u.last_name = u'Manager'
u.email_address = u'manager@somedomain.com'
u.password = u.original_password = u'managepass'
u.admin = True
return u
def test_user():
u1 = User()
u1.first_name = u'Example'
u1.last_name = u'User'
u1.email_address = u'example@test.de'
u1.password =
|
u1.original_password = u'test'
u1.tracking_key = 123456
u1.tracking_delay = 2
return u1
def test_users(n=50):
fake = Faker(locale='de_DE')
fake.seed(42)
users = []
for i in xrange(n):
u = User()
u.first_name =
|
fake.first_name()
u.last_name = fake.last_name()
u.email_address = fake.email()
u.password = u.original_password = fake.password()
u.tracking_key = fake.random_number(digits=6)
users.append(u)
return users
|
mobarski/sandbox
|
rsm/v9le/v5.py
|
Python
|
mit
| 8,419
| 0.049056
|
from common2 import *
# NAME IDEA -> pooling/random/sparse/distributed hebbian/horde/crowd/fragment/sample memory
# FEATURES:
# + boost -- neurons with empty mem slots learn faster
# + noise --
# + dropout -- temporal disabling of neurons
# + decay -- remove from mem
# + negatives -- learning to avoid detecting some patterns
# + fatigue -- winner has lower score for some time
# ~ sklearn -- compatibile api
# - prune -- if input < mem shrink mem ? (problem with m > input len)
# - weights -- sample weights for imbalanced classes
# - popularity -- most popular neuron is cloned / killed
# NEXT VERSION:
# - attention
# - https://towardsdatascience.com/the-fall-of-rnn-lstm-2d1594c74ce0
# - https://towardsdatascience.com/memory-attention-sequences-37456d271992
# - https://medium.com/breathe-publication/neural-networks-building-blocks-a5c47bcd7c8d
# - https://distill.pub/2016/augmented-rnns/
# - http://akosiorek.github.io/ml/2017/10/14/visual-attention.html
# + IDEA:
# append activated neurons indexes to queue available as input
# queue ages at constant rate and drops oldest values
# - IDEA:
# each neuron has small memory of activation prior to winning
# this memory is compared to ctx and intersection added to score
# winner updated this memory
# OPTION: several memories with diferent time frames
# NEXT VERSION:
# - layers -- rsm stacking
# NEXT VERSIONS:
# - numpy -- faster version
# - cython -- faster version
# - gpu -- faster version
# - distributed
class rsm:
def __init__(self,n,m,c=0,**kw):
"""Random Sample Memory
n -- number of neurons
m -- max connections per neuron (memory)
"""
self.mem = {j:set() for j in range(n)}
self.win = {j:0 for j in range(n)}
self.tow = {j:-42000 for j in range(n)} # time of win
self.t = 0
self.ctx = deque(maxlen=c) # context queue
# cfg
cfg = {}
cfg['n'] = n
cfg['m'] = m
cfg['c'] = c
cfg['k'] = kw.get('k',1)
cfg['method'] = kw.get('method',1)
cfg['cutoff'] = kw.get('cutoff',0.5)
cfg['decay'] = kw.get('decay',0.0)
cfg['dropout'] = kw.get('dropout',0.0)
cfg['fatigue'] = kw.get('fatigue',0)
cfg['boost'] = kw.get('boost',True)
cfg['noise'] = kw.get('noise',True)
cfg['sequence'] = kw.get('sequence',False)
cfg.update(kw)
self.cfg = cfg
# ---[ core ]---------------------------------------------------------------
def new_ctx(self):
self.ctx.clear()
# TODO -- input length vs mem length
# TODO -- args from cfg
def scores(self, input, raw=False, boost=False, noise=False, fatigue=0, dropout=0.0, **ignore): # -> dict[i] -> scores
"""
input -- sparse binary features
raw -- disable all postprocessing
boost -- improve scores based on number of unconnected synapses (TODO)
noise -- randomize scores to prevent snowballing
dropout -- temporal disabling of
|
neurons
"""
mem = self.mem
tow = self.tow
N = self.cfg['n']
M = self.cfg['m']
t = self.t
scores = {}
for j in mem:
scores[j] = len(set(input) & mem[j])
if raw:
return scores
if noise:
for j in mem:
scores[j] += 0.9*random()
if boost:
for j in mem:
scores[j] += 1+2*(M-
|
len(mem[j])) if len(mem[j])<M else 0
# TODO boost also based on low win ratio / low tow
if fatigue:
for j in mem:
dt = 1.0*min(fatigue,t - tow[j])
factor = dt / fatigue
scores[j] *= factor
if dropout:
k = int(round(float(dropout)*N))
for j in combinations(N,k):
scores[j] = -1
return scores
def learn(self, input, negative=False, **ignore):
for i in range(0,len(input),10):
self.learn_(set(input[i:i+10]),negative=negative)
def learn_(self, input, negative=False, **ignore):
"""
input -- sparse binary features
k -- number of winning neurons
"""
mem = self.mem
win = self.win
tow = self.tow
ctx = self.ctx
t = self.t
cfg = self.cfg
M = self.cfg['m']
N = self.cfg['n']
k = self.cfg['k']
decay = self.cfg['decay']
sequence = self.cfg['sequence']
known_inputs = set()
for j in mem:
known_inputs.update(mem[j])
# context
input = input | set(ctx)
# scoring
scores = self.scores(input, **cfg)
winners = top(k,scores)
for j in winners:
# negative learning
if negative:
mem[j].difference_update(input)
continue
# positive learning
unknown_inputs = input - known_inputs
mem[j].update(pick(unknown_inputs, M-len(mem[j])))
known_inputs.update(mem[j])
# handle decay
if decay:
decay_candidates = mem[j] - input
if decay_candidates:
for d in decay_candidates:
if random() < decay:
mem[j].remove(d)
# handle popularity
win[j] += 1
# handle fatigue
tow[j] = t
# handle context
if sequence:
for i in range(len(ctx)):
ctx[i] -= N
for j in winners:
ctx.append(-j-1)
self.t += 1
# ---[ auxiliary ]----------------------------------------------------------
def fit(self, X, Y):
cfg = self.cfg
for x,y in zip(X,Y):
negative = not y
self.learn(x,negative=negative,**cfg)
def fit2(self, X1, X0):
cfg = self.cfg
# TODO - unbalanced
for x1,x0 in zip(X1,X0):
self.learn(x1,negative=False,**cfg)
self.learn(x0,negative=True,**cfg)
def transform(self, X):
cutoff = self.cfg['cutoff']
out = []
for s in self.score_many(X):
y = 1 if s>=cutoff else 0
out += [y]
return out
def fit_transform(self, X, Y):
self.fit(X,Y)
return self.transform(X)
def score(self, X, Y, kind='acc'):
c = self.confusion(X,Y)
p = float(c['p'])
n = float(c['n'])
tp = float(c['tp'])
tn = float(c['tn'])
fp = float(c['fp'])
fn = float(c['fn'])
try:
if kind=='acc':
return (tp + tn) / (p + n)
elif kind=='f1':
return (2*tp) / (2*tp + fp + fn)
elif kind=='prec':
return tp / (tp + fp)
elif kind=='sens':
return tp / (tp + fn)
elif kind=='spec':
return tn / (tn + fp)
except ZeroDivisionError:
return float('nan')
def confusion(self, X, Y):
PY = self.transform(X)
p = 0
n = 0
tp = 0
tn = 0
fp = 0
fn = 0
for y,py in zip(Y,PY):
if y: p+=1
else: n+=1
if y:
if py: tp+=1
else: fn+=1
else:
if py: fp+=1
else: tn+=1
return dict(p=p,n=n,tp=tp,tn=tn,fp=fp,fn=fn)
def score_many(self, X):
out = []
for x in X:
s = self.score_one(x)
out += [s]
return out
# TODO
def calibrate(self, X, Y, kind='f1'):
for i in range(1,20):
c = 0.05*i
self.set_params(cutoff=c)
s = self.score(X,Y,kind)
print'{} {:.3} -> {:.3}'.format(kind,c,s)
def score_one(self, input):
"aggregate scores to scalar"
k = self.cfg['k']
method = self.cfg['method']
scores = self.scores(input)
M = self.cfg['m']
if method==0:
return top(k, scores, values=True)
elif method==1:
score = 1.0*sum(top(k, scores, values=True))/(k*(M+1))
return score
elif method==2:
score = 1.0*sum(top(k, scores, values=True))/(k*M)
return min(1.0,score)
if method==3:
score = 1.0*min(top(k, scores, values=True))/(M+1)
return score
elif method==4:
score = 1.0*min(top(k, scores, values=True))/M
return min(1.0,score)
if method==5:
score = 1.0*max(top(k, scores, values=True))/(M+1)
return score
elif method==6:
score = 1.0*max(top(k, scores, values=True))/M
return min(1.0,score)
def stats(self,prefix=''):
N = self.cfg['n']
M = self.cfg['m']
mem_v = self.mem.values()
out = {}
# mem
out['mem_empty'] = sum([1.0 if len(x)==0 else 0.0 for x in mem_v])/N
out['mem_not_empty'] = sum([1.0 if len(x)>0 else 0.0 for x in mem_v])/N
out['mem_full'] = sum([1.0 if len(x)==M else 0.0 for x in mem_v])/N
out['mem_avg'] = sum([1.0*len(x) for x in mem_v])/(N*M)
# win
win = list(sorted(self.win.values()))
out['win_min'] = win[0]
out['win_max'] = win[-1]
gini = 0
for a in win:
for b in win:
gini += abs(a-b)
gini = float(gini)/(2.0*len(win)*sum(win))
out['win_gini'] = round(gini,3)
# ctx
out['ctx_mem_sum'] = sum([1 if x<0 else 0 for m in mem_v for x in m])
out['ctx_mem_cnt'] = sum([max([1 if x<0 else 0 for x in m]) for m in mem_v if m])
out['ctx_mem_max'] = max([sum([1 if x<0 else 0 for x
|
firelab/viirs_ba
|
misc_utils/arcpy_functions.py
|
Python
|
cc0-1.0
| 4,645
| 0.010549
|
# This file contains the arcpy funcitons that export rasters and shape files
# These were removed from the production script because they are not used.
# I'm saving them here just in case..
# The function array2raster uses arcpy to output a raster from the VIIRS array.
# This function DOES NOT handle the pixel size properly. The output is NOT
# properly aligned in space. These rasters are for testing only.
def array2raster(array, lat, lon, OutRaster):
array = np.fliplr(np.flipud(array))
lat = np.fliplr(np.flipud(lat))
lon = np.fliplr(np.flipud(lon))
OutRaster = OutRaster + ".tif"
if os.path.exists(os.path.join(BaseDir, "tiffs", OutRaster)):
os.remove(os.path.join(BaseDir, "tiffs",OutRaster))
cellSize = 1
LLlat = float(lat[lat.shape[0]-1, 0])
LLlon = float(lon[lon.shape[0]-1, 0])
print "LLlat:", LLlat
print "LLlon:", LLlon
tempRaster = arcpy.NumPyArrayToRaster(array, arcpy.Point(LLlon, LLlat),cellSize, cellSize)
tempRaster.save(os.path.join(BaseDir, "tiffs",OutRaster))
del tempRaster
array = None
lat = None
lon = None
del array
del lat
del lon
# Output to shapefile
def out_to_shapefile(list, fileName, date):
shp_file = fileName +'.shp'
# Check for pre-existing shape, delete if necessary.
if os.path.exists(os.path.join(BaseDir, shp_file)):
arcpy.Delete_management(os.path.join(BaseDir, shp_file))
# Set up parameters and delete create shapefile.
geometry_type = "POINT"
spatial = """GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]"""
arcpy.CreateFeatureclass_management(BaseDir, shp_file, geometry_type, "", "Disabled", "Disabled", spatial)
# Add attributes
shp_file = os.path.join(BaseDir, shp_file)
arcpy.AddField_management(shp_file, "Lat", "FLOAT")
arcpy.AddField_management(shp_file, "Lon", "FLOAT")
arcpy.AddField_management(shp_file, "Date", "DATE")
# Set up cursor and loop through list adding rows.
cursor = arcpy.da.InsertCursor(shp_file, ["Lat", "Lon", "Date", "SHAPE@XY"])
for coord in list:
row = [coord[0], coord[1], date, (coord[1], coord[0])]
cursor.insertRow(row)
del cursor
# Output rasters from arrays
# The following should be uncommented if rasters are needed for testing.
##array2raster(M07ReflArray, LatArray, LonArray, "M07Refl")
##array2raster(M08ReflArray, LatArray, LonArray, "M08Refl")
##array2raster(M10ReflArray, LatArray, LonArray, "M10Refl")
##array2raster(M11ReflArray, LatArray, LonArray, "M11Refl")
##array2raster(AfArray, LatArray, LonArray,
|
"ActiveFire")
# Output shapefile
if ShapeOut == "y":
print "Exporting to point shapefile:"
if not os
|
.path.exists(ShapePath):
os.makedirs(ShapePath)
shp = ShapePath + '/' + 'fire_collection_point_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
Pgsql2shpExe = os.path.join(PostBin, "pgsql2shp")
query = '\"SELECT a.*, b.fid as col_id, b.active FROM fire_events a, fire_collections b WHERE a.collection_id = b.fid;\"'
command = '\"{0}\" -f {1} -h localhost -u {2} -P {3} {4} {5}'.format(Pgsql2shpExe, shp, DBuser, pwd, DBname, query).replace('\\', '/')
print command
subprocess.call(command, shell = True)
shutil.copy2(IniFile, os.path.join(ShapePath, os.path.basename(IniFile + '_'+ datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))))
print "Exporting to polygon shapefile:"
if not os.path.exists(ShapePath):
os.makedirs(ShapePath)
shp = ShapePath + '/' + 'fire_collection_poly_' + datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
Pgsql2shpExe = os.path.join(PostBin, "pgsql2shp")
query = '\"SELECT ST_Multi(ST_Union(ST_Expand(geom, 375))) as geom, collection_id FROM fire_events GROUP BY collection_id;\"'
command = '\"{0}\" -f {1} -h localhost -u {2} -P {3} {4} {5}'.format(Pgsql2shpExe, shp, DBuser, pwd, DBname, query).replace('\\', '/')
print command
subprocess.call(command, shell = True)
shutil.copy2(IniFile, os.path.join(ShapePath, os.path.basename(IniFile + '_'+ datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))))
ShapeOut = ini.get("OutputFlags", "ShapeFile").lower()
ShapePath = ini.get("OutputFlags", "OutShapeDir").lower()
# lines from ini file
#ShapeFile = n ; Flag to output to shapefile using pgsql2shp
#
#OutShapeDir = c:\fiddle\VIIRS_Data\ShapeOut ; Shapefile output directory
|
sqlalchemy/sqlalchemy
|
test/orm/test_scoping.py
|
Python
|
mit
| 7,276
| 0.000412
|
from unittest.mock import Mock
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import query
from sqlalchemy.orm import relationship
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assert_warns_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class ScopedSessionTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"table1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"table2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("someid", None, ForeignKey("table1.id")),
)
def test_basic(self):
table2, table1 = self.tables.table2, self.tables.table1
Session = scoped_session(sa.orm.sessionmaker(testing.db))
class CustomQuery(query.Query):
pass
class SomeObject(fixtures.ComparableEntity):
query = Session.query_property()
class SomeOtherObject(fixtures.ComparableEntity):
query = Session.query_property()
custom_query = Session.query_property(query_cls=CustomQuery)
self.mapper_registry.map_imperatively(
SomeObject,
table1,
properties={"options": relationship(SomeOtherObject)},
)
self.mapper_registry.map_imperatively(SomeOtherObject, table2)
s = SomeObject(id=1, data="hello")
sso = SomeOtherObject()
s.options.append(sso)
Session.add(s)
Session.commit()
Session.refresh(sso)
Session.remove()
eq_(
SomeObject(
id=1, data="hello", options=[SomeOtherObject(someid=1)]
),
Session.query(SomeObject).one(),
)
eq_(
SomeObject(
id=1, data="hello", options=[SomeOtherObject(someid=1)]
),
SomeObject.query.one(),
)
eq_(
SomeOtherObject(someid=1),
SomeOtherObject.query.filter(
SomeOtherObject.someid == sso.someid
).one(),
)
assert isinstance(SomeOtherObject.query, query.Query)
assert not isinstance(SomeOtherObject.query, CustomQuery)
assert isinstance(SomeOtherObject.custom_query, query.Query)
def test_config_errors(self):
Session = scoped_session(sa.orm.sessionmaker())
s = Session() # noqa
assert_raises_message(
sa.exc.InvalidRequestError,
"Scoped session is already present",
Session,
bind=testing.db,
)
assert_warns_message(
sa.exc.SAWarning,
"At least one scoped session is already present. ",
Session.configure,
bind=testing.db,
)
def test_call_with_kwargs(self):
mock_scope_func = Mock()
SessionMaker = sa.orm.sessionmaker()
Session = scoped_session(sa.orm.sessionmaker(), mock_scope_func)
s0 = SessionMaker()
assert s0.autoflush == True
mock_scope_func.return_value = 0
s1 = Session()
assert s1.autoflush == True
assert_raises_message(
sa.exc.InvalidRequestError,
"Scoped session is already present",
Session,
autoflush=False,
)
mock_scope_func.return_value = 1
s2 = Session(autoflush=False)
assert s2.autoflush == False
def test_methods_etc(self):
mock_session = Mock()
mock_session.bind = "the bind"
sess = scoped_session(lambda: mock_session)
sess.add("add")
sess.delete("delete")
sess.get("Cls", 5)
eq_(sess.bind, "the bind")
eq_(
mock_session.mock_calls,
[
mock.call.add("add", _warn=True),
mock.call.delete("delete"),
mock.call.get(
"Cls",
5,
options=None,
populate_existing=False,
with_for_update=None,
identity_token=None,
execution_options=None,
),
],
)
with mock.
|
patch(
"sqlalchemy.orm.session.object_session"
) as mock_object_session:
sess.object_session("foo")
eq_(mock_object_session.mock_calls, [mock.call("foo")])
@testing.combinations(
"style1",
"style2",
"style3",
"style4",
)
def test_get_bind_custom_session_subclass(s
|
elf, style):
"""test #6285"""
class MySession(Session):
if style == "style1":
def get_bind(self, mapper=None, **kwargs):
return super().get_bind(mapper=mapper, **kwargs)
elif style == "style2":
# this was the workaround for #6285, ensure it continues
# working as well
def get_bind(self, mapper=None, *args, **kwargs):
return super().get_bind(mapper, *args, **kwargs)
elif style == "style3":
# py2k style
def get_bind(self, mapper=None, *args, **kwargs):
return super(MySession, self).get_bind(
mapper, *args, **kwargs
)
elif style == "style4":
# py2k style
def get_bind(self, mapper=None, **kwargs):
return super(MySession, self).get_bind(
mapper=mapper, **kwargs
)
s1 = MySession(testing.db)
is_(s1.get_bind(), testing.db)
ss = scoped_session(sessionmaker(testing.db, class_=MySession))
is_(ss.get_bind(), testing.db)
def test_attributes(self):
expected = [
name
for cls in Session.mro()
for name in vars(cls)
if not name.startswith("_")
]
ignore_list = {
"connection_callable",
"transaction",
"in_transaction",
"in_nested_transaction",
"get_transaction",
"get_nested_transaction",
"prepare",
"invalidate",
"bind_mapper",
"bind_table",
"enable_relationship_loading",
"dispatch",
}
SM = scoped_session(sa.orm.sessionmaker(testing.db))
missing = [
name
for name in expected
if not hasattr(SM, name) and name not in ignore_list
]
eq_(missing, [])
|
chiahaoliu/pdf_lib
|
pdf_lib/pdf_helper.py
|
Python
|
mit
| 3,994
| 0.017526
|
# helper functions
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
from diffpy.Structure import loadStructure
from pyobjcryst.crystal import CreateCrystalFromC
|
IF
from diffpy.srreal.bondcalculator import BondCalculator
from diffpy.srreal.pdfcalculator import PDFCalculator
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
#from pdf_lib.glbl import glbl
from glbl import glbl
def read_full_gr(f_name, rmin=glbl.r_min, rmax = glbl.r_max, skip_num=glbl.skip_row):
''' simple function
|
to read .gr data generated from PDFgui'''
read_in = np.loadtxt(str(f_name), skiprows = skip_num)
raw_data = np.transpose(read_in)
raw_data_list = raw_in.tolist()
upper_ind = raw_list[0].index(rmax)
lower_ind = raw_list[0].index(rmin)
cut_data_list = np.asarray([raw_in[0][lower_ind:upper_ind], raw_in[1][lower_ind:upper_ind]])
return cut_data_list
def read_gr(f_name):
''' simple function to read .gr data in database'''
read_in = np.loadtxt(str(f_name))
plt.plot(read_in[0], read_in[1])
(gr_name, tail) = os.path.splitext(f_name)
plt.title(gr_name)
plt.xlabel('r, A')
plt.ylabel('G(r), A^-2')
def simple_pdf_cal(input_f, Debye = True, DW_factor = glbl.DW_factor, qmin = glbl.q_min, qmax = glbl.q_max, rmax = glbl.r_max):
''' simple pdf calculator. Take input .cif/.xyz file to calculate PDF
(only use PDFCalculator now, can't calculate nano structrue at this stage)
argument:
input_f - str - strcuture file name
Dw_factor - float - value of Debye-Waller factor, which accounts for thermal motions. Default=1 means zero temperature
'''
## calculate theoretical pdf from given structure file
# create structure
struc = loadStructure(input_f)
struc.Bisoequiv = DW_factor
# calculate PDF
pdfc = PDFCalculator(qmax = qmax, rmax = rmax)
dbc = DebyePDFCalculator(qmax = qmax, rmax = rmax)
if Debye:
(r, g) = dbc(struc, qmin = qmin)
else:
(r, g) = pdfc(struc, qmin=qmin)
return (r, g)
def dbc_iter(struc_f, iter_range):
'''change different range'''
import numpy as np
import matplotlib.pyplot as plt
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
from diffpy.Structure import loadStructure
struc = loadStructure(struc_f)
dbc = DebyePDFCalculator()
dbc.setStructure(struc)
dbc.qmax = 20
dbc.qmin = 0.5
dbc.rmax = 20
#par = eval(para)
#print(dbc.par)
for step in iter_range:
(r,g) = dbc(delta2 = step)
plt.plot(r,g)
#plt.legend('delta2 =%s' % str(step) )
def iter_bond(x_min, x_max, step=5):
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib.pyplot import plot
from diffpy.Structure import Structure, Atom, Lattice
from diffpy.srreal.pdfcalculator import DebyePDFCalculator
dbc = DebyePDFCalculator()
dbc.qmax = 20
dbc.qmin = 0.5
dbc.rmax = 20
iter_range = np.linspace(x_min, x_max, step)
fig_dim = len(iter_range)
acs = Atom('Cs', [0, 0, 0])
acl = Atom('Cl', [0.5, 0.5, 0.5])
plt.figure()
for ind, val in enumerate(iter_range):
cscl = Structure(atoms=[acs, acl],
lattice=Lattice(val, val, val, 90, 90, 90))
dbc.setStructure(cscl)
(r,g) = dbc()
print(val)
plt.subplot(fig_dim, 1, ind+1)
plt.plot(r,g)
plt.title('bond length = %s' % str(val))
def single_plot(x,y):
''' simple plot, can't stand the god damn syntax anymore
'''
import matplotlib.pyplot as plt
#plt.figure()
plt.plot(x,y)
plt.hold(False)
def multi_plot(data_sets):
''' multiplot by calling single_plot'''
if not isinstance(data_sets, tuple):
working_data = (data_sets)
else:
working_data = data_sets
for data in data_sets:
x_read = data[0]
y_read = data[1]
single_plot(x_read, y_read)
|
savoirfairelinux/account-financial-tools
|
account_chart_update/wizard/wizard_chart_update.py
|
Python
|
agpl-3.0
| 67,528
| 0.001274
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010 Zikzakmedia S.L. (http://www.zikzakmedia.com)
# Copyright (c) 2010 Pexego Sistemas Informáticos S.L. (http://www.pexego.es)
# @authors: Jordi Esteve (Zikzakmedia), Borja López Soilán (Pexego)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
from openerp.tools.translate import _
import logging
def _reopen(self, res_id, model):
return {
'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
# save original model in context, because selecting the list of available
# templates requires a model in context
'context': {
'default_model': model,
},
}
class WizardLog:
"""
*******************************************************************
Small helper class to store the messages and errors on the wizard.
*******************************************************************
"""
def __init__(self):
self.messages = []
self.errors = []
def add(self, message, is_error=False):
"""
Adds a message to the log.
"""
logger = logging.getLogger("account_chart_update")
if is_error:
logger.warning(u"Log line: %s" % message)
self.errors.append(message)
else:
logger.debug(u"Log line: %s" % message)
self.messages.append(message)
def has_errors(self):
"""
Returns whether errors where logged.
"""
return self.errors
def __call__(self):
return "".join(self.messages)
def __str__(self):
return "".join(self.messages)
def get_errors_str(self):
return "".join(self.errors)
class wizard_update_charts_accounts(orm.TransientModel):
_name = 'wizard.update.charts.accounts'
def _get_lang_selection_options(self, cr, uid, context={}):
"""
Gets the available languages for the selection.
"""
obj = self.pool.get('res.lang')
ids = obj.search(cr, uid, [], context=context)
res = obj.read(cr, uid, ids, ['code', 'name'], context)
return [(r['code'], r['name']) for r in res] + [('', '')]
_columns = {
'state': fields.selection([
('init', 'Step 1'),
('ready', 'Step 2'),
('done', 'Wizard completed')
], 'Status', readonly=True),
'company_id': fields.many2one(
'res.company',
'Company',
required=True,
ondelete='set null'
),
'chart_template_id': fields.many2one(
'account.chart.template',
'Chart Template',
ondelete='cascade',
required=True
),
'code_digits': fields.integer(
'# of digits',
required=True,
help="No. of digits to use for account code. "
"Make sure it is the same number as existing accounts."
),
'lang': fields.selection(
_get_lang_selection_options,
'Language',
size=5,
help="For records searched by name (taxes,
|
tax codes, fiscal positions), "
"the template name will be matched against the record name on this language."
),
'update_tax_code': fields.boolean(
'Update tax codes',
help="Existing tax codes are updated."
" Tax codes are searched by name."
),
'update_tax': fields.boolean(
'Update taxes',
|
help="Existing taxes are updated. Taxes are searched by name."
),
'update_account': fields.boolean(
'Update accounts',
help="Existing accounts are updated. Accounts are searched by code."
),
'update_fiscal_position': fields.boolean(
'Update fiscal positions',
help="Existing fiscal positions are updated. Fiscal positions are searched by name."
),
'update_children_accounts_parent': fields.boolean(
"Update children accounts parent",
help="Update the parent of accounts that seem (based on the code)"
" to be children of the newly created ones."
" If you had an account 430 with a child 4300000, and a 4300 "
"account is created, the 4300000 parent will be set to 4300."
),
'continue_on_errors': fields.boolean(
"Continue on errors",
help="If set, the wizard will continue to the next step even if "
"there are minor errors (for example the parent account "
"of a new account couldn't be set)."
),
'tax_code_ids': fields.one2many(
'wizard.update.charts.accounts.tax.code',
'update_chart_wizard_id',
'Tax codes',
ondelete='cascade'
),
'tax_ids': fields.one2many(
'wizard.update.charts.accounts.tax',
'update_chart_wizard_id',
'Taxes',
ondelete='cascade'
),
'account_ids': fields.one2many('wizard.update.charts.accounts.account',
'update_chart_wizard_id', 'Accounts', ondelete='cascade'),
'fiscal_position_ids': fields.one2many('wizard.update.charts.accounts.fiscal.position',
'update_chart_wizard_id', 'Fiscal positions', ondelete='cascade'),
'new_tax_codes': fields.integer('New tax codes', readonly=True),
'new_taxes': fields.integer('New taxes', readonly=True),
'new_accounts': fields.integer('New accounts', readonly=True),
'new_fps': fields.integer('New fiscal positions', readonly=True),
'updated_tax_codes': fields.integer('Updated tax codes', readonly=True),
'updated_taxes': fields.integer('Updated taxes', readonly=True),
'updated_accounts': fields.integer('Updated accounts', readonly=True),
'updated_fps': fields.integer('Updated fiscal positions', readonly=True),
'log': fields.text('Messages and Errors', readonly=True)
}
def name_search(self, cr, user, name,
args=None, operator='ilike', context=None, limit=80):
"""
Redefine the search to search by company name.
"""
if not name:
name = '%'
if not args:
args = []
if not context:
context = {}
args = args[:]
ids = []
ids = self.search(
cr, user, [('company_id', operator, name)] + args, limit=limit)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
"""
Use the company name and template as name.
"""
if context is None:
context = {}
if not len(ids):
return []
records = self.browse(cr, uid, ids, context)
res = []
for record in records:
res.append((record.id, record.company_id.name +
' - ' + record.chart_template_id.name))
return res
def _get_chart(self, cr, uid, context=None):
"""
Returns the default chart template.
|
guokr/asynx
|
asynx/asynx/__init__.py
|
Python
|
mit
| 194
| 0
|
from os imp
|
ort path
from .taskqueue import TaskQueueClient
__all__ = ['TaskQueueClient']
with open(path.join(path.dirname(__file__), 'version.txt'))
|
as fp:
__version__ = fp.read().strip()
|
luofei98/qgis
|
python/plugins/processing/gui/ProcessingToolbox.py
|
Python
|
gpl-2.0
| 16,100
| 0.000683
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingToolbox.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.utils import iface
from processing.modeler.ModelerUtils import ModelerUtils
from processing.core.Processing import Processing
from processing.core.ProcessingLog import ProcessingLog
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.gui.MissingDependencyDialog import MissingDependencyDialog
from processing.gui.AlgorithmClassification import AlgorithmDecorator
from processing.gui.ParametersDialog import ParametersDialog
from processing.gui.BatchProcessingDialog import BatchProcessingDialog
from processing.gui.EditRenderingStylesDialog import EditRenderingStylesDialog
from processing.ui.ui_ProcessingToolbox import Ui_ProcessingToolbox
class ProcessingToolbox(QDockWidget, Ui_ProcessingToolbox):
USE_CATEGORIES = '/Processing/UseSimplifiedInterface'
def __init__(self):
QDockWidget.__init__(self, None)
self.setupUi(self)
self.setAllowedAreas(Qt.LeftDockWidgetArea | Qt.RightDockWidgetArea)
self.modeComboBox.clear()
self.modeComboBox.addItems(['Simplified interface',
'Advanced interface'])
settings = QSettings()
if not settings.contains(self.USE_CATEGORIES):
settings.setValue(self.USE_CATEGORIES, True)
useCategories = settings.value(self.USE_CATEGORIES, type=bool)
if useCategories:
self.modeComboBox.setCurrentIndex(0)
else:
self.modeComboBox.setCurrentIndex(1)
self.modeComboBox.currentIndexChanged.connect(self.modeHasChanged)
self.searchBox.textChanged.connect(self.textChanged)
self.algorithmTree.customContextMenuRequested.connect(
self.showPopupMenu)
self.algorithmTree.doubleClicked.connect(self.executeAlgorithm)
if hasattr(self.searchBox, 'setPlaceholderText'):
self.searchBox.setPlaceholderText(self.tr('Search...'))
self.fillTree()
def textChanged(self):
text = self.searchBox.text().strip(' ').lower()
self._filterItem(self.algorithmTree.invisibleRootItem(), text)
if text:
self.algorithmTree.expandAll()
else:
self.algorithmTree.collapseAll()
self.algorithmTree.invisibleRootItem().child(0).setExpanded(True)
def _filterItem(self, item, text):
if (item.childCount() > 0):
show = False
for i in xrange(item.childCount()):
child = item.child(i)
showChild = self._filterItem(child, text)
show = showChild or show
item.setHidden(not show)
return show
elif isinstance(item, (TreeAlgorithmItem, TreeActionItem)):
hide = bool(text) and (text not in item.text(0).lower())
item.setHidden(hide)
return not hide
else:
item.setHidden(True)
return False
def modeHasChanged(self):
idx = self.modeComboBox.currentIndex()
settings = QSettings()
if idx == 0:
# Simplified
settings.setValue(self.USE_CATEGORIES, True)
else:
settings.setValue(self.USE_CATEGORIES, False)
self.fillTree()
def algsListHasChanged(self):
self.fillTree()
def updateProvider(self, providerName, updateAlgsList = True):
if updateAlgsList:
Processing.updateAlgsList()
for i in xrange(self.algorithmTree.invisibleRootItem().childCount()):
child = self.algorithmTree.invisibleRootItem().child(i)
if isinstance(child, TreeProviderItem):
if child.providerName == providerName:
child.refresh()
# sort categories and items in categories
child.sortChildren(0, Qt.AscendingOrder)
for i in xrange(child.childCount()):
child.child(i).sortChildren(0, Qt.AscendingOrder)
break
def showPopupMenu(self, point):
item = self.algorithmTree.itemAt(point)
if isinstance(item, TreeAlgorithmItem):
alg = item.alg
popupmenu = QMenu()
executeAction = QAction(self.tr('Execute'), self.algorithmTree)
executeAction.triggered.connect(self.executeAlgorithm)
popupmenu.addAction(executeAction)
if alg.canRunInBatchMode and not alg.allowOnlyOpenedLayers:
executeBatchAction = QAction(
self.tr('Execute as batch process'),
self.algorithmTree)
executeBatchAction.triggered.connect(
self.executeAlgorithmAsBatchProcess)
popupmenu.addAction(executeBatchAction)
popupmenu.addSeparator()
editRenderingStylesAction = QAction(
self.tr('Edit rendering styles for outputs'),
self.algorithmTree)
editRenderingStylesAction.triggered.connect(
self.editRenderingStyles)
popupmenu.addAction(editRenderingStylesAction)
actions = Processing.contextMenuActions
if len(actions) > 0:
popupmenu.addSeparator()
for action in actions:
action.setData(alg, self)
if action.isEnabled():
contextMenuAction = QAction(action.name,
self.algorithmTree)
contextMenuAction.triggered.connect(action.execute)
popupmenu.addAction(contextMenuAction)
popupmenu.exec_(self.algorithmTree.mapToGlobal(point))
def editRenderingStyles(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.commandLineName())
dlg = EditRenderingStylesDialog(alg)
dlg.exec_()
def executeAlgorithmAsBatchProcess(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.commandLineName())
dlg = BatchProcessingDialog(alg)
dlg.exec_()
def executeAlgorithm(self):
item = self.algorithmTree.currentItem()
|
if isinstance(item, TreeAlgorithmItem):
alg = Processing.getAlgorithm(item.alg.comma
|
ndLineName())
message = alg.checkBeforeOpeningParametersDialog()
if message:
dlg = MissingDependencyDialog(message)
dlg.exec_()
return
alg = alg.getCopy()
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = ParametersDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
|
mcjlnrtwcz/Rekoder
|
rekoder.py
|
Python
|
mit
| 272
| 0
|
""" Execute this file to launch Rekoder. Refer to README.md for usage. """
# Rekoder m
|
odules
from core.app import App
if __name__ == '__main__':
# Load configuration file and start application.
app = App()
app.load_json_config('config.json')
app.start
|
()
|
riquito/fs-radar
|
tests/test_path_filter.py
|
Python
|
gpl-3.0
| 6,014
| 0
|
import pytest
import unittest
from fs_radar.path_filter import makePathFilter, makeDirFilter
class MakePathFilterTest(unittest.TestCase):
def test_empty_rules(self):
f = makePathFilter([])
assert f('') is False
assert f('foo.txt') is False
def test_file_at_any_depth(self):
f = makePathFilter([
'foo.txt'
])
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/foo.txt')
def test_asterisk_for_file_at_any_depth(self):
f = makePathFilter([
'*.txt'
])
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/foo.txt')
def test_file_pattern_must_not_used_as_prefix(self):
f = makePathFilter([
'.doc'
])
assert f('foo.docx') is False
def test_match_only_at_relative_root(self):
f = makePathFilter([
'./foo.txt'
])
assert f('./foo.txt')
assert f('foo.txt')
assert f('/foo.txt') is False
assert f('a/b/foo.txt') is False
def test_match_only_absolute_path(self):
f = makePathFilter([
'/a/b/foo.txt'
])
assert f('/a/b/foo.txt')
assert f('a/b/foo.txt') is False
def test_match_directory_and_any_file_underneath(self):
f = makePathFilter([
'a/b/'
])
assert f('a/b/')
assert f('a/b')
assert f('a/b/foo.txt')
assert f('a/b/c/')
assert f('a/b/c/bar')
def test_do_not_use_directory_as_prefix(self):
f = makePathFilter([
|
'a/b/'
])
assert f('a/bo') is False
def test_just_asterisk(self):
f = makePathFilter([
'*'
])
assert f('') is False
assert f('foo.txt')
assert f('a/b/')
def test_start_with_asterisk(self):
f = makeP
|
athFilter([
'*a',
'*b/foo'
])
assert f('a')
assert f('xyza')
assert f('b') is False
assert f('b/foo')
assert f('xb/foo')
def test_single_asterisk(self):
f = makePathFilter([
'a/*foo/a',
'b/bar*/b',
'c/*baz*/c',
])
assert f('a/foo/a')
assert f('a/xfoo/a')
assert f('b/bar/b')
assert f('b/barx/b')
assert f('c/baz/c')
assert f('c/xbaz/c')
assert f('c/bazx/c')
assert f('c/xbazx/c')
assert f('abcdfoo/a') is False
def test_just_multi_asterisks(self):
f = makePathFilter([
'**'
])
assert f('') is False
assert f('foo.txt')
assert f('a/b/')
def test_file_start_multi_asterisks(self):
f = makePathFilter([
'**a'
])
assert f('foo.txt') is False
assert f('ba')
assert f('bar') is False
assert f('ba/example/') is False
assert f('x/y/a')
def test_dir_start_multi_asterisks(self):
f = makePathFilter([
'**a/'
])
assert f('ba')
assert f('bar') is False
assert f('ba/example/')
assert f('x/y/a/')
def test_multi_asterisks(self):
f = makePathFilter([
'a/**/x'
])
assert f('a/x') is False
assert f('a/one-level/x')
assert f('a/multi/level/deep/x')
assert f('a/b/c') is False
def test_exclusion(self):
f = makePathFilter([
"app/cache/*",
"!app/cache/*.txt",
"+app/cache/do-not-exclude-me.txt"
])
assert f('app/cache/include-me')
assert f('app/cache/exclude-me.txt') is False
assert f('app/cache/do-not-exclude-me.txt')
def test_working_directory_just_dot(self):
f = makePathFilter([
'.'
])
assert f('.')
assert f('./')
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/')
def test_working_directory_dot_slash(self):
f = makePathFilter([
'././'
])
assert f('.')
assert f('./')
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/')
class MakeDirFilterTest(unittest.TestCase):
def test_empty_rules(self):
f = makeDirFilter([])
assert f('') is False
assert f('a/') is False
def test_dir_at_any_depth(self):
f = makeDirFilter([
'a/'
])
assert f('a/')
assert f('./a')
assert f('b/a/')
def test_ignore_file_keep_dir(self):
f = makeDirFilter([
'a/foo.txt'
])
assert f('a/')
assert f('./a')
assert f('b/a/')
def test_excluded_dir(self):
f = makeDirFilter([
'a/',
'!a/b/',
'!a/c/foo.txt',
'!a/d/',
'+a/d/baz.txt'
])
assert f('a/')
assert f('a/b/') is False
assert f('a/c/')
assert f('a/d/')
def test_working_directory_just_dot(self):
f = makeDirFilter([
'.'
])
assert f('.')
assert f('./')
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/')
def test_working_directory_dot_slash(self):
f = makeDirFilter([
'././'
])
assert f('.')
assert f('./')
assert f('foo.txt')
assert f('./foo.txt')
assert f('a/b/')
def test_single_asterisk_in_a_path_starting_with_dot(self):
f = makeDirFilter([
'./a/*/c/'
])
assert f('./a/b/c')
assert f('./a/b/d') is False
assert f('a/b/c')
assert f('a/b/d') is False
def test_double_asterisk_in_a_path_starting_with_dot(self):
f = makeDirFilter([
'./a/**/c/'
])
assert f('./a/b/c')
assert f('./a/b1/b2/c')
assert f('./a/b/d') is False
assert f('a/b/c')
|
javierhuerta/unach-photo-server
|
tests/urls.py
|
Python
|
mit
| 296
| 0.003378
|
# -*-
|
coding: utf-8
from __future__ import unicode_literals, absolute_import
from django.conf.urls import url, include
from unach_photo_server.urls import urlpatterns as unach_photo_server_urls
urlpatterns = [
url(r'^', include(unach_photo_server_urls, names
|
pace='unach_photo_server')),
]
|
xdevelsistemas/taiga-back-community
|
taiga/projects/attachments/__init__.py
|
Python
|
agpl-3.0
| 1,012
| 0
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
|
# it under the terms of the GNU Affero General Public License as
# published by the Free Sof
|
tware Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
default_app_config = "taiga.projects.attachments.apps.AttachmentsAppConfig"
|
cea-hpc/shine
|
lib/Shine/Commands/Install.py
|
Python
|
gpl-2.0
| 4,458
| 0.001795
|
# Install.py -- File system installation commands
# Copyright (C) 2007-2013 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to
|
the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
from __future__ import print_function
import sys
from Shine.Configuration.Globals import Globals
from Shine.FSUtils import create_lustrefs
from Shine.Lustre.FileSystem import FSRemoteError
from Shine.Commands.Base.Command import Command, CommandHelpException
from Shine.Commands.Base.CommandRCDefs import RC_OK, RC_FAILURE
# Lustre events
from Shine.Commands.Base.FSEventHandler import FSGlobalEve
|
ntHandler
class Install(Command):
"""
shine install -m /path/to/model.lmf
"""
NAME = "install"
DESCRIPTION = "Install a new file system."
def execute(self):
# Option sanity check
self.forbidden(self.options.fsnames, "-f, see -m")
self.forbidden(self.options.labels, "-l")
self.forbidden(self.options.indexes, "-i")
self.forbidden(self.options.failover, "-F")
rc = RC_OK
if not self.options.model:
raise CommandHelpException("Lustre model file path"
"(-m <model_file>) argument required.", self)
eh = FSGlobalEventHandler(self)
# Use this Shine.FSUtils convenience function.
lmf = self.get_lmf_path()
if lmf:
print("Using Lustre model file %s" % lmf)
else:
raise CommandHelpException("Lustre model file for ``%s'' not found:"
" please use filename or full LMF path.\n"
"Your default model files directory (lmf_dir) is: %s" %
(self.options.model, Globals().get_lmf_dir()), self)
install_nodes = self.options.nodes
excluded_nodes = self.options.excludes
fs_conf, fs = create_lustrefs(self.get_lmf_path(),
event_handler=eh, nodes=install_nodes,
excluded=excluded_nodes)
# Register the filesystem in backend
print("Registering FS %s to backend..." % fs.fs_name)
if self.options.dryrun:
rc = 0
else:
rc = self.register_fs(fs_conf)
if rc:
msg = "Error: failed to register FS to backend (rc=%d)" % rc
print(msg, file=sys.stderr)
else:
print("Filesystem %s registered." % fs.fs_name)
# Helper message.
# If user specified nodes which were not used, warn him about it.
actual_nodes = fs.components.managed().servers()
if not self.check_valid_list(fs_conf.get_fs_name(), \
actual_nodes, "install"):
return RC_FAILURE
# Install file system configuration files; normally, this should
# not be done by the Shine.Lustre.FileSystem object itself, but as
# all proxy methods are currently handled by it, it is more
# convenient this way...
try:
fs.install(fs_conf.get_cfg_filename(),
dryrun=self.options.dryrun)
tuning_conf = Globals().get_tuning_file()
if tuning_conf:
fs.install(tuning_conf, dryrun=self.options.dryrun)
except FSRemoteError as error:
print("WARNING: Due to error, installation skipped on %s"
% error.nodes)
rc = RC_FAILURE
if not install_nodes and not excluded_nodes:
# Give pointer to next user step.
print("Use `shine format -f %s' to initialize the file system." %
fs_conf.get_fs_name())
return rc
def register_fs(self, fs_conf):
# register file system configuration to the backend
fs_conf.register_fs()
fs_conf.register_targets()
|
lfcnassif/MultiContentViewer
|
release/modules/ext/libreoffice/program/python-core-3.3.0/lib/email/iterators.py
|
Python
|
lgpl-3.0
| 2,205
| 0.002268
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Various types of useful iterators and generators."""
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
for line in StringIO(payload):
yield line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
|
def _structure(msg, fp=None, level=0, includ
|
e_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level+1, include_default)
|
aouerfelli/SuperDD-Python
|
res/Room.py
|
Python
|
mit
| 8,839
| 0.006788
|
from pygame import *
from res import createImage, playSound
class Room():
def __init__(self, levelSize, spritePath):
self.level = Surface(levelSize)
self.levelRect = self.level.get_rect()
self.levelUnlocked = False
self.levelCleared = False
self.spike = createImage("%sspike.png"%(spritePath))
self.spikes = []
self.walls = []
self.blocks = []
self.platforms = []
self.pillars = []
self.dynamicObjects = {
"key": {
"sprite": createImage("%skey.png"%(spritePath)),
"rect": None # Don't need this, keeping it for format
}
}
# Position helper function
def checkEdgePos(self, pos, sprite):
pos = list(pos)
if pos[0] == -1: pos[0] = self.levelRect.width - sprite.get_width()
if pos[1] == -1: pos[1] = self.levelRect.height - sprite.get_height()
return pos
# Level Decor ---
def setBackdrop(self):
for x in range(0, self.levelRect.width, self.tile.get_width()):
for y in range(0, self.levelRect.height, self.tile.get_height()):
self.level.blit(self.tile, (x, y))
def setStartDoor(self):
doorRect = self.startDoor.get_rect()
# Ew, hardcoded hero values
doorRect.centerx = self.initHeroPos[0] + 20
doorRect.bottom = self.initHeroPos[1] + 46
self.level.blit(self.startDoor, doorRect)
def addWallTop(self, pos, length):
wWidth, wHeight = self.wallBlocks["top"].get_size()
self.walls.append(Rect(
pos[0] - wWidth,
pos[1] - wHeight,
wWidth * length,
wHeight
))
for top in range(pos[0], wWidth * length, wWidth):
self.levelBorder.blit(self.wallBlocks["top"], (top, pos[1]))
def addWallRight(self, pos, length):
wWidth, wHeight = self.wallBlocks["right"].get_size()
self.walls.append(Rect(
pos[0] - wWidth,
pos[1] - wHeight,
wWidth,
wHeight * length
))
for right in range(pos[1], wHeight * length, wHeight):
self.levelBorder.blit(self.wallBlocks["right"], (pos[0], right))
def addWallBottom(self, pos, length):
wWidth, wHeight = self.wallBlocks["bottom"].get_size()
self.walls.append(Rect(
pos[0] - wWidth,
pos[1] - wHeight,
wWidth * length,
wHeight
))
for bottom in range(pos[0], wWidth * length, wWidth):
self.levelBorder.blit(self.wallBlocks["bottom"], (bottom, pos[1]))
def addWallLeft(self, pos, length):
wWidth, wHeight = self.wallBlocks["left"].get_size()
self.walls.append(Rect(
pos[0] - wWidth,
pos[1] - wHeight,
wWidth,
wHeight * length
))
for left in range(pos[1], wHeight * length, wHeight):
self.levelBorder.blit(self.wallBlocks["left"], (pos[0], left))
def addWallBoxTop(self, pos, size):
bBlockW, bBlockH = self.wallBlocks["right"].get_width(), self.wallBlocks["bottom"].get_height()
size = (size[0] * bBlockW, size[1] * bBlockH)
pos = list(pos)
if pos[0] == -1: pos[0] = self.levelBorder.get_rect().right - size[0]
self.walls.append(Rect((pos[0] - bBlockW, pos[1] - bBlockH), size))
wallBoxRect = Rect(pos, size)
self.levelBorder.set_clip(wallBoxRect)
bg = Surface(size)
bg.fill(self.bgBColor)
self.levelBorder.blit(bg, wallBoxRect)
for bottom in range(pos[0], pos[0] + size[0], self.wallBlocks["bottom"].get_width()):
self.levelBorder.blit(self.wallBlocks["bottom"], (bottom, wallBoxRect.bottom - bBlockH))
for right in range(pos[1], pos[1] + size[1], self.wallBlocks["right"].get_height()):
self.levelBorder.blit(self.wallBlocks["right"], (wallBoxRect.left, right))
for left in range(pos[1], pos[1] + size[1], self.wallBlocks["left"].get_height()):
self.levelBorder.blit(self.wallBlocks["left"], (wallBoxRect.right - bBlockW, left))
self.levelBorder.set_clip(None)
def addWallBoxBottom(self, pos, size):
bBlockW, bBlockH = self.wallBlocks["left"].get_width(), self.wallBlocks["top"].get_height()
size = (size[0] * bBlockW, size[1] * bBlockH)
pos = list(pos)
if pos[0] == -1: pos[0] = self.levelBorder.get_rect().right - size[0]
if pos[1] == -1: pos[1] = self.levelBorder.get_rect().bottom - size[1]
self.walls.append(Rect((pos[0] - bBlockW, pos[1] - bBlockH), size))
wallBoxRect = Rect(pos, size)
self.levelBorder.set_clip(wallBoxRect)
bg = Surface(size)
bg.fill(self.bgBColor)
self.levelBorder.blit(bg, wallBoxRect)
for top in range(pos[0], pos[0] + size[0], self.wallBlocks["top"].get_width()):
self.levelBorder.blit(self.wallBlocks["top"], (top, wallBoxRect.top))
for left in range(pos[1], pos[1] + size[1], self.wallBlocks["left"].get_height()):
self.levelBorder.blit(self.wallBlocks["left"], (wallBoxRect.right - bBlockW, left))
for right in range(pos[1], pos[1] + size[1], self.wallBlocks["right"].get_height()):
self.levelBorder.blit(self.wallBlocks["right"], (wallBoxRect.left, right))
self.levelBorder.set_clip(None)
# Level objects
def setKey(self, pos):
self.dynamicObjects["key"]["rect"] = Rect(pos, self.dynamicObjects["key"]["sprite"].get_size())
def unlockLevel(self):
self.levelUnlocked = True
playSound("./assets/sounds/key_sound.ogg")
del self.dynamicObjects["key"]
self.door.openDoor()
self.door.updateDoor(self)
def addBlock(self, typeOf, pos):
sprite = self.blockTypes[typeOf]
pos = self.checkEdgePos(pos, sprite)
self.blocks.append(Rect(pos, sprite.get_size()))
self.level.blit(sprite, pos)
def addPlatform(self, typeOf, pos):
sprite = self.platformTypes[typeOf]
pos = self.checkEdgePos(pos, sprite)
self.platforms.append(Rect(pos, sprite.get_size()))
self.level.blit(sprite, pos)
def addPillar(self, typeOf, pos):
sprite = self.pillarTypes[typeOf]
pos = self.checkEdgePos(pos, sprite)
if not typeOf == "background": self.pillars.append(Rect(pos, sprite.get_size()))
self.level.blit(sprite, pos)
def addSpike(self, pos):
sprite = self.spike
pos = self.checkEdgePos(pos, sprite)
self.spikes.append(Rect(pos, sprite.get_size()))
self.level.blit(sprite, pos)
class RoomRegular(Room):
def __init__(self, spritePath, levelSize):
super().__init__(levelSize, spritePath)
|
self.bgFColor = 0x22373A
self.bgBColor = 0x1B2821
self.tile = createImage("%sRegular/backdrop.png"%(spritePath))
self.startDoor = createImage("%sRegular/door_start.png"%(spritePath))
self.wallBlocks = {
"top": createImage("%sRegular/border_top.png"%(spritePath)),
"right": createImage("%sRegular/border_right.png"%(spritePath)),
"bot
|
tom": createImage("%sRegular/border_bottom.png"%(spritePath)),
"left": createImage("%sRegular/border_left.png"%(spritePath))
}
self.blockTypes = {
"long": createImage("%sRegular/long_block.png"%(spritePath)),
"short": createImage("%sRegular/short_block.png"%(spritePath)),
"mini": createImage("%sRegular/mini_block.png"%(spritePath))
}
self.platformTypes = {
"long": createImage("%sRegular/long_platform.png"%(spritePath)),
"short": createImage("%sRegular/short_platform.png"%(spritePath)),
"mini": createImage("%sRegular/mini_platform.png"%(spritePath))
}
s
|
tgquintela/pySpatialTools
|
pySpatialTools/utils/util_classes/__init__.py
|
Python
|
mit
| 266
| 0.007519
|
"""
Ut
|
il classes
------------
Classes wh
|
ich represent data types useful for the package pySpatialTools.
"""
## Spatial elements collectors
from spatialelements import SpatialElementsCollection, Locations
## Membership relations
from Membership import Membership
|
magnatronus/titanium-sac
|
titanium-sac.py
|
Python
|
mit
| 4,652
| 0.025795
|
#
# ti-sac.py is a Titanium plug-in for Sublime Text 3
#
# developed by Steve Rogers, SpiralArm Consulting Ltd (www.spiralarm.uk)
# @sarmcon
#
#
import sublime, sublime_plugin, os
import Titanium.lib.tiutils as Ti
# This will create a new Alloy Widget
class sacAlloyWidgetCommand(sublime_plugin.WindowCommand):
def run(self,paths=[]):
if len(paths) > 0 and self.window.active_view():
self.projectDir = paths[0]
self.window.active_view().window().show_input_panel("Widget name:", '',self.createWidget, None, None)
def createWidget(self, name):
Ti.createAlloyWidget(self.projectDir + "/app", name)
# This will create a new Alloy Controller
class sacAlloyControllerCommand(sublime_plugin.WindowCommand):
def run(self,paths=[]):
if len(paths) > 0 and self.window.active_view():
self.projectDir = paths[0]
self.window.active_view().window().show_input_panel("Controller name:", '',self.createController, None, None)
def createController(self, name):
Ti.createAlloyController(self.projectDir + "/app", name)
# This will clean the current project
class sacCleanProjectCommand(sublime_plugin.WindowCommand):
def run(self,paths=[]):
if len(paths) > 0:
Ti.cleanProject(paths[0])
# This will create a Titanium Alloy Project
class sacCreateAlloyCommand(sublime_plugin.TextCommand):
def run(self, edit):
# ask for the App name
self.view.window().show_input_panel("Project name:", 'test',self.createProjectFiles, None, None)
#self.createProjectFiles("alloytest") #- test version that just creates the specified project
def createProjectFiles(self, projectName):
# Turn the console on
sublime.active_window().run_command("show_panel", {"panel": "console", "toggle": True})
# Define the project meta data and the proposed directory
self.projectDir = Ti.getProjectDirectory(projectName)
Ti.consolePrint("","\n--------------------------------------------------------------------------------------------------")
Ti.consolePrint("info", "Creating Project: %s in %s................." % (projectName,self.projectDir))
# If dir exists then DONT create project
if os.path.exists(self.projectDir):
sublime.error_message("Unable to create Titanium project, the directory already exists: %s " % self.projectDir)
else:
# Step 1 - First Step Create Titanium skeleton project
Ti.createClassicProject(projectName)
# Step 2 - Now Generate the Alloy Bit
Ti.generateAlloyProject(self.projectDir)
# Step 3 - Now Create the sublime Project files
Ti.consolePrint('info', "Generating Sublime Project....")
Ti.createSublimeProject(self.projectDir)
# Step 4 Finally open the project (opens in a new sublime instance)
os.system("open %s" % self.projectDir+".sublime-project")
# Step 4a - possible add the project to the recent project list so it can be opened with Open Recent or Quick Project Switch
#TODO::
# This will create a Titanium Classic Project
class sacCreateCommand(sublime_plugin.TextCommand):
def run(self, edit):
# ask for the App name
self.view.window().show_input_panel("Project name:", 'test',self.createProjectFiles, None, None)
#self.createProjectFiles("test") #- test version that just creates the specified project
def createProjectFiles(self, projectName):
# Turn the console on
sublime.active_window().run_command("show_panel", {"panel": "console", "toggle": True})
# Define the project meta data and the proposed directory
self.projectDir = Ti.getProjectDirectory(projectName)
Ti.consolePrint("","\n--------------------------------------------------------------------------------------------------")
Ti.consolePrint("info", "Creating Project: %s in %s................." % (projectName,self.projectDir))
# If dir exists then DONT crea
|
te project
if os.path.exists(self.projectDir):
sublime.error_message("Unable to create Titanium project, the directory already exists: %s " % self.projectDir)
else:
# Step 1 - Create Titanium skeleton project
Ti.createClassicProject(projectName)
# Step 2 - Now Create the sublime Project files
Ti.consolePrint('info', "Generating Sublime Project....")
|
Ti.createSublimeProject(self.projectDir)
# Step 3 Finally open the project (opens in a new sublime instance)
os.system("open %s" % self.projectDir+".sublime-project")
# Step 3a - possible add the project to the recent project list so it can be opened with Open Recent or Quick Project Switch
#TODO::
# This will open the Plugin - config file
class sacEditConfigCommand(sublime_plugin.TextCommand):
def run(self,edit):
open_file_settings('titanium-sac.sublime-settings')
|
lablup/sorna-client
|
src/ai/backend/client/cli/admin/resource_policies.py
|
Python
|
lgpl-3.0
| 8,377
| 0.000716
|
import sys
import click
from tabulate import tabulate
from . import admin
from ...session import Session
from ..pretty import print_error, print_fail
@admin.command()
@click.option('-n', '--name', type=str, default=None,
help='Name of the resource policy.')
def resource_policy(name):
"""
Show details about a keypair resource policy. When `name` option is omitted, the
resource policy for the current access_key will be returned.
"""
fields = [
('Name', 'name'),
('Created At', 'created_at'),
('Default for Unspecified', 'default_for_unspecified'),
('Total Resource Slot', 'total_resource_slots'),
('Max Concurrent Sessions', 'max_concurrent_sessions'),
('Max Containers per Session', 'max_containers_per_session'),
('Max vFolder Count', 'max_vfolder_count'),
('Max vFolder Size', 'max_vfolder_size'),
('Idle Timeeout', 'idle_timeout'),
('Allowed vFolder Hosts', 'allowed_vfolder_hosts'),
]
with Session() as session:
try:
rp = session.ResourcePolicy(session.config.access_key)
info = rp.info(name, fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
rows = []
if info is None:
print('No such resource policy.')
sys.exit(1)
for name, key in fields:
rows.append((name, info[key]))
print(tabulate(rows, headers=('Field', 'Value')))
@admin.group(invoke_without_command=True)
@click.pass_context
def resource_policies(ctx):
'''
List and manage resource policies.
(admin privilege required)
'''
if ctx.invoked_subcommand is not None:
return
fields = [
('Name', 'name'),
('Created At', 'created_at'),
('Default for Unspecified', 'default_for_unspecified'),
('Total Resource Slot', 'total_resource_slots'),
('Max Concurrent Sessions', 'max_concurrent_sessions'),
('Max Containers per Session', 'max_containers_per_session'),
('Max vFolder Count', 'max_vfolder_count'),
('Max vFolder Size', 'max_vfolder_size'),
('Idle Timeeout', 'idle_timeout'),
('Allowed vFolder Hosts', 'allowed_vfolder_hosts'),
]
with Session() as session:
try:
items = session.ResourcePolicy.list(fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
if len(items) == 0:
print('There are no keypair resource policies.')
return
print(tabulate((item.values() for item in items),
headers=(item[0] for item in fields)))
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
@click.option('--default-for-unspecified', type=str, default='UNLIMITED',
help='Default behavior for unspecified resources: '
'LIMITED, UNLIMITED')
@click.option('--total-resource-slots', type=str, default='{}',
help='Set total resource slots.')
@click.option('--max-concurrent-sessions', type=int, default=30,
help='Number of maximum concurrent sessions.')
@click.option('--max-containers-per-session', type=int, default=1,
help='Number of maximum containers per session.')
@click.option('--max-vfolder-count', type=int, default=10,
help='Number of maxim
|
um virtual folders allowed.')
@click.option('--max-vfolder-size', type=int, default=0,
help='Maximum virtual folder size (future plan).')
@cli
|
ck.option('--idle-timeout', type=int, default=1800,
help='The maximum period of time allowed for kernels to wait '
'further requests.')
# @click.option('--allowed-vfolder-hosts', type=click.Tuple(str), default=['local'],
# help='Locations to create virtual folders.')
@click.option('--allowed-vfolder-hosts', default=['local'],
help='Locations to create virtual folders.')
def add(name, default_for_unspecified, total_resource_slots, max_concurrent_sessions,
max_containers_per_session, max_vfolder_count, max_vfolder_size,
idle_timeout, allowed_vfolder_hosts):
'''
Add a new keypair resource policy.
NAME: NAME of a new keypair resource policy.
'''
with Session() as session:
try:
data = session.ResourcePolicy.create(
name,
default_for_unspecified=default_for_unspecified,
total_resource_slots=total_resource_slots,
max_concurrent_sessions=max_concurrent_sessions,
max_containers_per_session=max_containers_per_session,
max_vfolder_count=max_vfolder_count,
max_vfolder_size=max_vfolder_size,
idle_timeout=idle_timeout,
allowed_vfolder_hosts=allowed_vfolder_hosts,
)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair Resource Policy creation has failed: {0}'
.format(data['msg']))
sys.exit(1)
item = data['resource_policy']
print('Keypair resource policy ' + item['name'] + ' is created.')
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
@click.option('--default-for-unspecified', type=str,
help='Default behavior for unspecified resources: '
'LIMITED, UNLIMITED')
@click.option('--total-resource-slots', type=str,
help='Set total resource slots.')
@click.option('--max-concurrent-sessions', type=int,
help='Number of maximum concurrent sessions.')
@click.option('--max-containers-per-session', type=int,
help='Number of maximum containers per session.')
@click.option('--max-vfolder-count', type=int,
help='Number of maximum virtual folders allowed.')
@click.option('--max-vfolder-size', type=int,
help='Maximum virtual folder size (future plan).')
@click.option('--idle-timeout', type=int,
help='The maximum period of time allowed for kernels to wait '
'further requests.')
@click.option('--allowed-vfolder-hosts', help='Locations to create virtual folders.')
def update(name, default_for_unspecified, total_resource_slots,
max_concurrent_sessions, max_containers_per_session, max_vfolder_count,
max_vfolder_size, idle_timeout, allowed_vfolder_hosts):
"""
Update an existing keypair resource policy.
NAME: NAME of a keypair resource policy to update.
"""
with Session() as session:
try:
data = session.ResourcePolicy.update(
name,
default_for_unspecified=default_for_unspecified,
total_resource_slots=total_resource_slots,
max_concurrent_sessions=max_concurrent_sessions,
max_containers_per_session=max_containers_per_session,
max_vfolder_count=max_vfolder_count,
max_vfolder_size=max_vfolder_size,
idle_timeout=idle_timeout,
allowed_vfolder_hosts=allowed_vfolder_hosts,
)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('KeyPair Resource Policy creation has failed: {0}'
.format(data['msg']))
sys.exit(1)
print('Update succeeded.')
@resource_policies.command()
@click.argument('name', type=str, default=None, metavar='NAME')
def delete(name):
"""
Delete a keypair resource policy.
NAME: NAME of a keypair resource policy to delete.
"""
with Session() as session:
if input('Are you sure? (y/n): ').lower().strip()[:1] != 'y':
print('Canceled.')
sys.exit(1)
try:
data = session.ResourcePolicy.delete(name)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
|
boedy1996/SPARC
|
geonode/geoserver/helpers.py
|
Python
|
gpl-3.0
| 54,697
| 0.000896
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import sys
import os
import urllib
import logging
import re
import time
import errno
import uuid
import datetime
from bs4 import BeautifulSoup
import geoserver
import httplib2
from urlparse import urlparse
from urlparse import urlsplit
from threading import local
from collections import namedtuple
from itertools import cycle, izip
from lxml import etree
import xml.etree.ElementTree as ET
from decimal import Decimal
from owslib.wcs import WebCoverageService
from owslib.util import http_post
from django.core.exceptions import ImproperlyConfigured
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import pre_delete
from django.template.loader import render_to_string
from django.conf import settings
from dialogos.models import Comment
from agon_ratings.models import OverallRating
from gsimporter import Client
from owslib.wms import WebMapService
from geoserver.store import CoverageStore, DataStore
from geoserver.workspace import Workspace
from geoserver.catalog import Catalog
from geoserver.catalog import FailedRequestError, UploadError
from geoserver.catalog import ConflictingDataError
from geoserver.resource import FeatureType, Coverage
from geoserver.support import DimensionInfo
from geonode import GeoNodeException
from geonode.layers.utils import layer_type, get_files
from geonode.layers.models import Layer, Attribute, Style
from geonode.layers.enumerations import LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES
logger = logging.getLogger(__name__)
if not hasattr(settings, 'OGC_SERVER'):
msg = (
'Please configure OGC_SERVER when enabling geonode.geoserver.'
' More info can be found at '
'http://docs.geonode.org/en/master/reference/developers/settings.html#ogc-server')
raise ImproperlyConfigured(msg)
def check_geoserver_is_up():
"""Verifies all geoserver is running,
this is needed to be able to upload.
"""
url = "%sweb/" % ogc_server_settings.LOCATION
resp, content = http_client.request(url, "GET")
msg = ('Cannot connect to the GeoServer at %s\nPlease make sure you '
'have started it.' % ogc_server_settings.LOCATION)
assert resp['status'] == '200', msg
def _add_sld_boilerplate(symbolizer):
"""
Wrap an XML snippet representing a single symbolizer in the appropriate
elements to make it a valid SLD which applies that symbolizer to all features,
including format strings to allow interpolating a "name" variable in.
"""
return """
<StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<NamedLayer>
<Name>%(name)s</Name>
<UserStyle>
<Name>%(name)s</Name>
<Title>%(name)s</Title>
<FeatureTypeStyle>
<Rule>
""" + symbolizer + """
</Rule>
</FeatureTypeStyle>
</UserStyle>
</NamedLayer>
</StyledLayerDescriptor>
"""
_raster_template = """
<RasterSymbolizer>
<Opacity>1.0</Opacity>
</RasterSymbolizer>
"""
_polygon_template = """
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(bg)s</CssParameter>
</Fill>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
<CssParameter name="stroke-width">0.7</CssParameter>
</Stroke>
</PolygonSymbolizer>
"""
_line_template = """
<LineSymbolizer>
<Stroke>
<CssParameter name="stroke">%(bg)s</CssParameter>
<CssParameter name="stroke-width">3</CssParameter>
</Stroke>
</LineSymbolizer>
</Rule>
</FeatureTypeStyle>
<FeatureTypeStyle>
<Rule>
<LineSymbolizer>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
</Stroke>
</LineSymbolizer>
"""
_point_template = """
<PointSymbolizer>
<Graphic>
<Mark>
<WellKnownName>%(mark)s</WellKnownName>
<Fill>
<CssParameter name="fill">%(bg)s</CssParameter>
</Fill>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
</Stroke>
</Mark>
<Size>10</Size>
</Graphic>
</PointSymbolizer>
"""
_style_templates = dict(
raster=_add_sld_boilerplate(_raster_template),
polygon=_add_sld_boilerplate(_polygon_template),
line=_add_sld_boilerplate(_line_template),
point=_add_sld_boilerplate(_point_template)
)
def _style_name(resource):
return _punc.sub("_", resource.store.workspace.name + ":" + resource.name)
def get_sld_for(layer):
# FIXME: GeoServer sometimes fails to associate a style with the data, so
# for now we default to using a point style.(it works for lines and
# polygons, hope this doesn't happen for rasters though)
name = layer.default_style.name if layer.default_style is not None else "point"
# FIXME: When gsconfig.py exposes the default geometry type for vector
# layers we should use that rather than guessing based on the auto-detected
# style.
if name in _style_templates:
fg, bg, mark = _style_contexts.next()
return _style_templates[name] % dict(
name=layer.name,
fg=fg,
bg=bg,
mark=mark)
else:
return None
def fixup_style(cat, resource, style):
logger.debug("Creating styles for layers associated with [%s]", resource)
layers = cat.get_layers(resource=resource)
logger.info("Found %d layers associated with [%s]", len(layers), resource)
for lyr in layers:
if lyr.default_style.name in _style_templates:
logger.info("%s uses a default style, generating a new one", lyr)
name = _style_name(resource)
if style is None:
sld = get_sld_for(lyr)
else:
sld = style.read()
logger.info("Creating style [%s]", name)
style = cat.create_style(name, sld)
lyr.default_style = cat.get_style(name)
logger.info("Saving changes to %s", lyr)
cat.save(lyr)
logger.info("Successfully updated %s", lyr)
def cascading_delete(cat, layer_name):
resource = None
try:
if layer_name.find(':') != -1:
workspace, name = layer_name.split(':')
ws = cat.get_workspace(workspace)
if ws is None:
logger.debug(
'cascading delete was called on a layer where the workspace was not found')
return
resource = cat.get_resource(name, workspace=workspace)
else:
resource = cat.get_resource(layer_name)
except EnvironmentError as e:
if e.errno == errno.ECONNREFUSED:
msg = ('Could not connect to geoserver at "%s"'
'to save information for layer "%s"' % (
ogc_server_settings.LOCATION, layer_name)
)
logger.warn(msg, e)
|
return None
else:
raise e
if resource is None:
# If there is no associated resource,
# this method can not delete anything.
# Let's return and make a note in the log.
logger.deb
|
ug(
'cascading_delete was called with a non existent resource')
return
resource_name = resource.name
ly
|
jmcnamara/XlsxWriter
|
xlsxwriter/test/comparison/test_chart_format23.py
|
Python
|
bsd-2-clause
| 1,541
| 0
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file create
|
d by Excel.
"""
def setUp(self):
self.set_filename('chart_for
|
mat23.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [108321024, 108328448]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'border': {'color': 'yellow'},
'fill': {'color': 'red', 'transparency': 100},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
roshantha9/AbstractManycoreSim
|
src/libApplicationModel/SimpleBinaryTree.py
|
Python
|
gpl-3.0
| 2,226
| 0.014376
|
# simple binary tree
# in this implementation, a node is inserted between an existing node and the root
class BinaryTree():
def __init__(self,rootid):
self.left = None
self.right = None
self.rootid = rootid
def getLeftChild(self):
return self.left
def getRightChild(self):
return self.right
def setNodeValue(self,value):
self.rootid = value
def getNodeValue(self):
return self.rootid
def insertRight(self,newNode):
if self.right == None:
self.right = BinaryTree(newNode)
else:
tree = BinaryTree(newNode)
tree.right = self.right
self.right = tree
def insertLeft(self,newNode):
if self.left == None:
self.left = BinaryTree(newNode)
else:
tree = BinaryTree(newNode)
self.left = tree
tree.left = self.left
# try to insert to left, if not insert to right
def insert(self, newNode, max_depth, current_depth=0):
if self.left == None:
self.left = BinaryTree(newNode)
else:
if(current_depth < max_depth):
current_depth+=1
self.left.insert(newNode, max_depth, current_depth)
else:
if(self.right == None):
self.right = BinaryTree(newNode)
else:
'''
def insert(item, tree):
if (item < t
|
ree.e
|
ntry):
if (tree.left != None):
insert(item, tree.left)
else:
tree.left = Tree(item)
else:
if (tree.right != None):
insert(item, tree.right)
else:
tree.right = Tree(item)
'''
def printTree(tree):
if tree != None:
printTree(tree.getLeftChild())
print(tree.getNodeValue())
printTree(tree.getRightChild())
# test tree
def testTree():
myTree = BinaryTree("Maud")
myTree.insertLeft("Bob")
myTree.insertRight("Tony")
myTree.insertRight("Steven")
printTree(myTree)
testTree()
|
mattliston/examples
|
example009.py
|
Python
|
mit
| 5,626
| 0.013686
|
# wget http://stuff.mit.edu/afs/sipb/contrib/pi/pi-billion.txt
# THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python example009.py
from __future__ import division
import numpy as np
import theano
import theano.tensor as T
import lasagne as L
import argparse
import time
from six.moves import cPickle
np.set_printoptions(threshold='nan')
np.set_printoptions(linewidth=200)
np.set_printoptions(formatter={'float': '{:12.8f}'.format, 'int': '{:4d}'.format})
print 'numpy ' + np.__version__
print 'theano ' + theano.__version__
print 'lasagne ' + L.__version__
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ndigits', help='number of digits, default 1000000', default=1000000, type=int)
parser.add_argument('--window', help='window size, default=100', default=100, type=int)
parser.add_argument('--lr', help='learning rate, default 0.001', default=0.001, type=float)
parser.add_argument('--nepoch', help='number of epochs, default=100', default=100, type=int)
parser.add_argument('--nbatch', help='number of batches per eopch, default=100', default=100, type=int)
parser.add_argument('--batchsize', help='batch size, default 1000', default=1000, type=int)
parser.add_argument('--test', help='test fraction, default 0.2', default=0.2, type=float)
parser.add_argument('--model', help='output model filename')
args = parser.parse_args()
print args
# load data
with open('pi-billion.txt') as f:
s = f.read()
f.close()
pi = np.empty([args.ndigits],dtype='float32')
i=0
for c in s:
if c.isdigit():
pi[i] = float(c)
i+=1
if i==args.ndigits:
break
print 'pi.shape',pi.shape
input_var = T.matrix(dtype=theano.config.floatX)
target_var = T.vector(dtype='int32')
network = L.layers.InputLayer((None, args.window), input_var)
print 'input', L.layers.get_output_shape(network)
network = L.layers.ReshapeLayer(network, ((-1, 1, args.window)))
print 'reshape', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
network = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(network)
conv = L.layers.Conv1DLayer(network,num_filters=256,filter_size=11,stride=2)
print 'conv', L.layers.get_output_shape(conv)
gap = L.layers.Pool1DLayer(conv, pool_size=L.layers.get_output_shape(conv)[2], stride=None, pad=0, mode='average_inc_pad')
print 'gap', L.layers.get_output_shape(gap)
network = L.layers.DenseLayer(gap, 2, nonlinearity=L.nonlinearities.softmax)
print 'output', L.layers.get_output_shape(network)
#input_var = T.matrix(dtype=theano.config.floatX)
#target_var = T.vector(dtype='int32')
#network = L.layers.InputLayer((None, args.window), input_var)
#network = L.layers.DenseLayer(network, 10000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 1000)
#network = L.layers.DenseLayer(network, 100)
#network = L.layers.DenseLayer(network, 2, nonlinearity=L.nonlinearities.softmax)
prediction = L.layers.get_output(network)
loss = L.objectives.aggregate(L.o
|
bjectives.categorical_crossentropy(prediction, target_var)
|
, mode='mean')
params = L.layers.get_all_params(network, trainable=True)
updates = L.updates.adam(loss, params, learning_rate=args.lr)
scaled_grads,norm = L.updates.total_norm_constraint(T.grad(loss,params), np.inf, return_norm=True)
train_fn = theano.function([input_var, target_var], [loss,norm], updates=updates)
test_fn = theano.function([input_var], L.layers.get_output(network, deterministic=True))
d = np.empty([args.batchsize,args.window],dtype='float32')
l = np.empty([args.batchsize],dtype='int32')
t0 = time.time()
t = time.time()
for i in range(args.nepoch):
tloss=0
tnorm=0
#train
for j in range(args.nbatch):
for k in range(args.batchsize):
#w = np.random.randint(int(pi.shape[0]*args.test),pi.shape[0]-args.window)
w = np.random.randint(0,int(pi.shape[0]*(1-args.test))-args.window)
d[k] = pi[w:w+args.window]
if np.random.randint(0,2)==0:
l[k]=0
else:
np.random.shuffle(d[k])
l[k]=1
bloss,bnorm = train_fn(d,l)
tloss += bloss
tnorm += bnorm
#test
for k in range(args.batchsize):
#w = np.random.randint(0,int(pi.shape[0]*args.test-args.window))
w = np.random.randint(int(pi.shape[0]*(1-args.test)),pi.shape[0]-args.window)
d[k] = pi[w:w+args.window]
if np.random.randint(0,2)==0:
l[k]=0
else:
np.random.shuffle(d[k])
l[k]=1
val_output = test_fn(d)
val_predictions = np.argmax(val_output, axis=1)
tacc = np.mean(val_predictions == l)
print 'epoch {:8d} loss {:12.8f} grad {:12.8f} accuracy {:12.8f} n_zero {:6d} n_one {:6d} t_epoch {:4d} t_total {:8d}'.format(i, tloss/args.nbatch, tnorm/args.nbatch, tacc, np.sum(val_predictions==0), np.sum(val_predictions==1), int(time.time()-t), int(time.time()-t0))
t = time.time()
f = open(args.model, 'wb')
cPickle.dump(L.layers.get_all_param_values(network), f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
|
jasonge27/picasso
|
python-package/doc/source/conf.py
|
Python
|
gpl-3.0
| 5,024
| 0.000796
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pycasso documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 24 01:54:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
sys.path.insert(0, os.path.abspath('../../'))
# copy README
shutil.copy('../../README.rst', './README.rst')
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['APItemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pycasso'
copyright = '2017, Haoming Jiang, Jason Ge'
author = 'Haoming Jiang, Jason Ge'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
VERSION_PATH = os.path.join(os.path.dirname(__file__), '../../pycasso/VERSION')
# The full version, including alpha/beta/rc tags.
release = open(VERSION_PATH).read().strip()
# The short X.Y version.
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycassodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpap
|
er',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figu
|
re_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycasso.tex', 'pycasso Documentation',
'Haoming Jiang, Jian Ge', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycasso', 'pycasso Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycasso', 'pycasso Documentation',
author, 'pycasso', 'One line description of project.',
'Miscellaneous'),
]
|
nis-sdn/odenos
|
apps/cli/producer.py
|
Python
|
apache-2.0
| 16,458
| 0.006744
|
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import random
class Topology(object):
"""
Topology source: ring, mesh, linear and fat_tree
[source] -- send() --> [coroutine] -- send() --> [coroutine(sink)]
args:
node
port
eport
link
slice
slice_condition <priority_policy>
federation
priority_policy:
minmax 0 or 65535
random 0 ~ 65535
fixed 10
Note: in case of fat_tree topo, this generates topo with fixed parameters:
the number of pods: 40
the number of ports: 40
the number of ToR SW: 40 * pods
the number of aggregation SW: 2 * pods
the number of core SW: 4
"""
eports = 3
nodes = 10
# fat_tree-related
EPORT = 0
NULL = 0
CORE = 1
AGGREGATION = 2
TOR = 3
LEFT = 1
RIGHT = 2
def __init__(self, *args, **kwargs):
"""
kwrags:
networks The number of networks
topo_type Topology type
nodes The number of nodes
eports The number of unconnected (external) nodes
"""
if 'networks' in kwargs:
self.networks = kwargs['networks']
else:
self.networks = 1
self.topo_type = kwargs['topo_type']
if 'nodes' in kwargs:
self.nodes = kwargs['nodes']
else:
self.nodes = Topology.nodes
if 'eports' in kwargs:
self.eports = kwargs['eports']
else:
self.eports = Topology.eports
if self.topo_type == 'fat_tree':
"""
layer: core(0), aggregation(1), tor(2)
pod: 1 ~ 40
"""
self.formatstr = '{layer:}{pod:02}{left_right}{number:02}'
else:
self.formatstr = '{:0'+str(len(str(self.nodes+self.eports))+1)+'}'
# Coroutine setting
def __call__(self, cr_next=None):
self.cr_next = cr_next
return self
def close(self):
self.cr_next.close()
# Coroutine send imitation
# TODO: this method should be coroutine's send()
def send(self, data):
cr_next = self.cr_next
args = []
kwargs = {}
for s in data:
if isinstance(s, dict):
k = s.keys()[0]
v = s.values()[0]
kwargs[k] = v
else:
args.append(s)
gen_type = args[0]
if gen_type == 'node':
return self._generate_node(cr_next)
elif gen_type == 'port':
return self._generate_port(cr_next)
elif gen_type == 'eport': # External port
ret
|
urn self._generate_eport(cr_next)
elif gen_type == 'link':
return self._generate_link(cr_next)
elif gen_type == 'slice':
return self._generate_slice(cr_next)
elif gen_type == 'slice_condition':
if len(args) == 2:
return self._generate_slice_condition(cr_next, args[1])
else:
raise Exception('Requires slice_policy')
elif gen_type == 'federation':
if len(arg
|
s) == 3:
return self._generate_federation(cr_next, args[1], args[2])
else:
raise Exception('Requires boundary_node and boundary_port')
def _generate_node(self, cr_next):
formatstr = self.formatstr
if self.topo_type == 'fat_tree':
CORE = Topology.CORE
AGGR = Topology.AGGREGATION
TOR = Topology.TOR
LEFT = Topology.LEFT
RIGHT = Topology.RIGHT
np = formatstr.format
NULL = Topology.NULL
for i in range(1, self.networks+1):
# Core
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=LEFT, number=2)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=1)])
cr_next.send([i, np(layer=CORE, pod=NULL, left_right=RIGHT, number=2)])
# Aggregation
for pod in range(1,41):
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=LEFT, number=1)])
cr_next.send([i, np(layer=AGGR, pod=pod, left_right=RIGHT, number=1)])
# ToR
for tor in range(1,21):
cr_next.send([i, np(layer=TOR, pod=pod, left_right=LEFT, number=tor)])
cr_next.send([i, np(layer=TOR, pod=pod, left_right=RIGHT, number=tor)])
else:
for i in range(1, self.networks+1):
for j in range(1, self.nodes+1):
cr_next.send([i, formatstr.format(j)])
def _generate_port(self, cr_next):
networks = self.networks
nodes = self.nodes
formatstr = self.formatstr
topo = self.topo_type
if topo == 'ring':
"""
...[node]--adj_left--[node]--adj_right--[node]...
"""
for i in range(1, networks + 1):
for j in range(1, nodes+1):
node = formatstr.format(j)
if j == 1:
adj_left = formatstr.format(nodes)
adj_right = formatstr.format(2)
elif j == nodes:
adj_left = formatstr.format(nodes - 1)
adj_right = formatstr.format(1)
else:
adj_left = formatstr.format(j-1)
adj_right = formatstr.format(j+1)
cr_next.send([i, node, adj_left])
cr_next.send([i, node, adj_right])
elif topo == 'mesh':
"""
| |
...[node]----[node]----[node]...
1 : range(1,1), range(2,1001)
2 : range(1,2), range(3,1001)
3 : range(1,3), range(4,1001)
:
1000: range(1,1000), range(1001,1001)
"""
for i in range(1, networks+1):
for j in range(1, nodes+1):
node = formatstr.format(j)
for port in range(1,j):
cr_next.send([i, node, formatstr.format(port)])
for port in range(j+1,nodes+1):
cr_next.send([i, node, formatstr.format(port)])
elif topo == 'linear':
"""
[node]---[node]...[node]---[node]
"""
for i in range(1, networks+1):
for j in range(1, nodes+1):
node = formatstr.format(j)
if j == 1:
adj_right = formatstr.format(2)
cr_next.send([i, node, adj_right])
elif j == nodes:
adj_left = formatstr.format(nodes - 1)
cr_next.send([i, node, adj_left])
else:
adj_left = formatstr.format(j-1)
adj_right = formatstr.form
|
secret-transaction/RipOff9Gag
|
server/ripoff9gag/api.py
|
Python
|
apache-2.0
| 225
| 0.004444
|
import endpoints
from api_user import UserAPI
from api_posts import PostsAPI
from api_comments import ReactionAPI
from api_image import ImageA
|
PI
APPLICATION = endpoints.api_server([PostsAPI, ReactionAPI, UserAPI, Ima
|
geAPI])
|
Jobava/zamboni
|
mkt/commonplace/views.py
|
Python
|
bsd-3-clause
| 6,014
| 0.000166
|
import json
import os
from urlparse import urlparse
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import resolve
from django.http import Http404
from django.shortcuts import render
from django.utils import translation
from django.views.decorators.cache import cache_control
from django.views.decorators.gzip import gzip_page
import newrelic.agent
import waffle
from mkt.commonplace.models import DeployBuildId
from mkt.regions.middleware import RegionMiddleware
from mkt.account.helpers import fxa_auth_info
from mkt.webapps.models import Webapp
@gzip_page
@cache_control(max_age=settings.CACHE_MIDDLEWARE_SECONDS)
def commonplace(request, repo, **kwargs):
"""Serves the frontend single-page apps."""
if repo not in settings.FRONTEND_REPOS:
raise Http404
BUILD_ID = get_build_id(repo)
ua = request.META.get('HTTP_USER_AGENT', '').lower()
include_splash = False
detect_region_with_geoip = False
if repo == 'fireplace':
include_splash = True
has_sim_info_in_query = (
'mccs' in request.GET or
('mcc' in request.GET and 'mnc' in request.GET))
if not has_sim_info_in_query:
# If we didn't receive mcc/mnc, then use geoip to detect region,
# enabling fireplace to avoid the consumer_info API call that it
# does normally to fetch the region.
detect_region_with_geoip = True
fxa_auth_state, fxa_auth_url = fxa_auth_info()
site_settings = {
'dev_pay_providers': settings.DEV_PAY_PROVIDERS,
'fxa_auth_state': fxa_auth_state,
'fxa_auth_url': fxa_auth_url,
}
ctx = {
'BUILD_ID': BUILD_ID,
'LANG': request.LANG,
'DIR': lang_dir(request.LANG),
'include_splash': include_splash,
'repo': repo,
'robots': 'googlebot' in ua,
'site_settings': site_settings,
'newrelic_header': newrelic.agent.get_browser_timing_header,
'newrelic_footer': newrelic.agent.get_browser_timing_footer,
}
if repo == 'fireplace':
# For OpenGraph stuff.
resolved_url = resolve(request.path)
if resolved_url.url_name == 'detail':
ctx = add_app_ctx(ctx, resolved_url.kwargs['app_slug'])
ctx['waffle_switches'] = list(
waffle.models.Switch.objects.filter(active=True)
.values_list('name', flat=True))
media_url = urlparse(settings.MEDIA_URL)
if media_url.netloc:
ctx['media_origin'] = media_url.scheme + '://' + media_url.netloc
if detect_region_with_geoip:
region_middleware = RegionMiddleware()
ctx['geoip_region'] = region_middleware.region_from_request(request)
if repo in settings.REACT_REPOS:
return render(request, 'commonplace/index_react.html', ctx)
elif repo in settings.COMMONPLACE_REPOS:
return render(request, 'commonplace/index.html', ctx)
def get_allowed_origins(request, include_loop=True):
current_domain = settings.DOMAIN
current_origin = '%s://%s' % ('https' if request.is_secure() else 'http',
current_domain)
development_server = (settings.DEBUG or
current_domain == 'marketplace-dev.allizom.org')
allowed = [
# Start by allowing the 2 app:// variants for the current domain,
# and then add the current http or https origin.
'app://packaged.%s' % current_domain,
'app://%s' % current_domain,
current_origin,
# Also include Tarako
'app://tarako.%s' % current_domain,
]
# On dev, also allow localhost/mp.dev.
if development_server:
allowed.extend([
'http://localhost:86
|
75',
'https://localhost:8675',
'http://localhost',
'https://localhost',
'http://mp.dev',
'https://mp.dev',
])
if include_loop:
# Include loop origins if necessary.
allowed.extend([
'https://hello.firefox.com',
'https://call.firefox.com',
])
# On dev, include loop dev & stage origin as well.
if development_server:
allowed.extend([
|
'https://loop-webapp-dev.stage.mozaws.net',
'https://call.stage.mozaws.net',
])
return json.dumps(allowed)
def get_build_id(repo):
try:
# Get the build ID from the database (bug 1083185).
return DeployBuildId.objects.get(repo=repo).build_id
except DeployBuildId.DoesNotExist:
# If we haven't initialized a build ID yet, read it directly from the
# build_id.txt by our frontend builds.
try:
build_id_path = os.path.join(settings.MEDIA_ROOT, repo,
'build_id.txt')
with storage.open(build_id_path) as f:
return f.read()
except:
return 'dev'
def fxa_authorize(request):
"""
A page to mimic commonplace's fxa-authorize page to handle login.
"""
return render(request, 'commonplace/fxa_authorize.html')
def add_app_ctx(ctx, app_slug):
"""
If we are hitting the Fireplace detail page, get the app for Open Graph
tags.
"""
try:
app = Webapp.objects.get(app_slug=app_slug)
ctx['app'] = app
except Webapp.DoesNotExist:
pass
return ctx
@gzip_page
def iframe_install(request):
return render(request, 'commonplace/iframe-install.html', {
'allowed_origins': get_allowed_origins(request)
})
@gzip_page
def potatolytics(request):
return render(request, 'commonplace/potatolytics.html', {
'allowed_origins': get_allowed_origins(request,
include_loop=False)
})
def lang_dir(lang):
if lang == 'rtl' or translation.get_language_bidi():
return 'rtl'
else:
return 'ltr'
|
sebastianffx/active_deep_segmentation
|
flickrsearch.py
|
Python
|
gpl-2.0
| 2,457
| 0.014652
|
""" Copyright (C) 2015 Sebastian Otalora
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import flickrapi
import json
import matplotlib.pyplot as plt
import Image
import urllib, cStringIO
import numpy as np
api_key = u'3724c5fcd02af2a224329c086d64a68c' #TODO: Put this into a file of .gitignore
api_secret = u'6e80f744db6eb5c3'
flickr = flickrapi.FlickrAPI(api_key, api_secret,format='parsed-json')
#Obtain medium sized (maxdim = 500px) images URLS hosted in flickr for the provided label
#https://www.flickr.com/services/api/misc.urls.html
def get_catimgs_urls(label='chair', show_samples=0):
#The search query is stored in a JSON object
imgs_urls = []
photos = flickr.photos_search(tags=label,sort='relevance', geo_context=1, extras='url_m')
#photos['photos']['photo'] is a list with all
|
the results imgs as dicts with ids, captions, urls and so forth
for img_id in range(len(photos['photos']['photo'])):
cur_img_id = photos['photos']['photo'][img_id]
imgs_urls.append(cur_img_id['url_m']) #medium size
#photoSizes = flickr.photos_getSizes(photo_id=cur_img_id['id'])
#cur_img_id['sizes']['s
|
ize'][0] contains the diferent sources of the img in diferent sizes:
#{u'height': 75,
# u'label': u'Square',
# u'media': u'photo',
# u'source': u'https://farm6.staticflickr.com/5836/22322409944_1498c04fb6_s.jpg',
# u'url': u'https://www.flickr.com/photos/g20_turkey/22322409944/sizes/sq/',
# u'width': 75}
#photos = flickr.photos_search(tags='chair', lat='42.355056', lon='-71.065503', radius='5')
#sets = flickr.photosets.getList(user_id='73509078@N00')
#lets parse the source URL
if show_samples:
img_idt = 0
URL = imgs_urls[img_idt]
img_file = cStringIO.StringIO(urllib.urlopen(URL).read())
img = Image.open(img_file)
#lets look at the image with plt
plt.imshow(img)
plt.show()
return imgs_urls
#lets save all the image urls to process with the opencv gui
|
addition-it-solutions/project-all
|
addons/account_budget/__openerp__.py
|
Python
|
agpl-3.0
| 3,013
| 0.003319
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Budgets Management',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
This module allows accountants to manage analytic and crossovered budgets.
==========================================================================
Once the Budgets are defined (in Invoicing/Budgets/Budgets), the Project Managers
can set the planned amount on each Analytic Account.
The accountant has the possibility to see the total of amount planned for each
Budget in order to ensure the total planned is no
|
t greater/lower
|
than what he
planned for this Budget. Each list of record can also be switched to a graphical
view of it.
Three reports are available:
----------------------------
1. The first is available from a list of Budgets. It gives the spreading, for
these Budgets, of the Analytic Accounts.
2. The second is a summary of the previous one, it only gives the spreading,
for the selected Budgets, of the Analytic Accounts.
3. The last one is available from the Analytic Chart of Accounts. It gives
the spreading, for the selected Analytic Accounts of Budgets.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/accounting',
'depends': ['account'],
'data': [
'security/ir.model.access.csv',
'security/account_budget_security.xml',
'account_budget_view.xml',
'account_budget_report.xml',
'account_budget_workflow.xml',
'wizard/account_budget_analytic_view.xml',
'wizard/account_budget_report_view.xml',
'wizard/account_budget_crossovered_summary_report_view.xml',
'wizard/account_budget_crossovered_report_view.xml',
'views/report_analyticaccountbudget.xml',
'views/report_budget.xml',
'views/report_crossoveredbudget.xml',
],
'demo': ['account_budget_demo.xml'],
'test': [
'test/account_budget.yml',
'test/account_budget_report.yml',
],
'installable': True,
'auto_install': False,
}
|
tkwon/dj-stripe
|
djstripe/migrations/0021_auto_20170107_0813.py
|
Python
|
mit
| 865
| 0.001156
|
# -*- co
|
ding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-07 08:13
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations
import djstripe.fields
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0020_auto_20161229_0041'),
]
operations = [
migrations.AlterField(
model_name='subscription',
name='applicat
|
ion_fee_percent',
field=djstripe.fields.StripePercentField(decimal_places=2, help_text="A positive decimal that represents the fee percentage of the subscription invoice amount that will be transferred to the application owner's Stripe account each billing period.", max_digits=5, null=True, validators=[django.core.validators.MinValueValidator(1.0), django.core.validators.MaxValueValidator(100.0)]),
),
]
|
aayoubi/HNTwitter-bot
|
hnbot/test/test_hnbot.py
|
Python
|
gpl-3.0
| 306
| 0.022876
|
#!/usr/bin/env python
# -*- coding: utf-8 -
|
*-
import unittest
import sys
import os
import hnbot
class HnbotMessage(unittest.TestCase):
def testTooLarge(s
|
elf):
"""test should fail if message size is bigger than 140 characters"""
self.assertEqual(1,1)
if __name__ == "__main__":
unittest.main()
|
anilpai/leetcode
|
Codewars/BiggestSum.py
|
Python
|
mit
| 353
| 0.002833
|
'''
Bigges
|
t Sum from Top left to Bottom Right
'''
def find_sum(m):
p = [0] * (len(m) + 1)
for l in m:
for i, v in enumerate(l, 1):
p[i] = v + max(p[i-1], p[i])
return p[-1]
matrix = [[20, 20, 10, 10],
[10, 20, 10, 10],
[10, 20
|
, 20, 20],
[10, 10, 10, 20]]
print(find_sum(matrix) == 140)
|
lrvick/kral
|
kral/services/twitter.py
|
Python
|
agpl-3.0
| 2,819
| 0.009933
|
# -*- coding: utf-8 -*-
import base64
import time
import simplejson as json
from eventlet.green import urllib2
import urllib
from kral import config
def stream(queries, queue, kral_start_time):
url = 'https://stream.twitter.com/1/statuses/filter.json'
queries = [q.lower() for q in que
|
ries]
quoted_queries = [urllib.quote(q) for q in quer
|
ies]
query_post = 'track=' + ",".join(quoted_queries)
request = urllib2.Request(url, query_post)
auth = base64.b64encode('%s:%s' % (config.TWITTER['user'], config.TWITTER['password']))
request.add_header('Authorization', "basic %s" % auth)
request.add_header('User-agent', config.USER_AGENT)
for item in urllib2.urlopen(request):
try:
item = json.loads(item)
except json.JSONDecodeError: #for whatever reason json reading twitters json sometimes raises this
continue
if 'text' in item and 'user' in item:
#determine what query we're on if it exists in the text
text = item['text'].lower()
query = ''
for q in queries:
q_uni = unicode(q, 'utf-8')
if q_uni in text:
query = q_uni
lang = False
if config.LANG:
if item['user']['lang'] == config.LANG:
lang = True
else:
lang = True
if query and lang:
post = {
'service' : 'twitter',
'user' : {
'id' : item['user']['id_str'],
'utc' : item['user']['utc_offset'],
'name' : item['user']['screen_name'],
'description' : item['user']['description'],
'location' : item['user']['location'],
'avatar' : item['user']['profile_image_url'],
'subscribers': item['user']['followers_count'],
'subscriptions': item['user']['friends_count'],
'website': item['user']['url'],
'language' : item['user']['lang'],
},
'links' : [],
'id' : item['id'],
'application': item['source'],
'date' : int(time.mktime(time.strptime(item['created_at'],'%a %b %d %H:%M:%S +0000 %Y'))),
'text' : text,
'query' : query,
'geo' : item['coordinates'],
}
for url in item['entities']['urls']:
post['links'].append({ 'href' : url.get('url') })
queue.put(post)
|
simonpessemesse/seguinus
|
chambres_chercheErreurDate.py
|
Python
|
gpl-2.0
| 361
| 0.01108
|
# coding: utf-8
import configureEnvironnement
configureEnvironnement.setup()
import django
django.setup()
from datetime import datetime, date, timedelta
from chambres.models import Reservation, Client
from chambres.views import OneDaySt
|
ats
rs = Reservation.objects.all()
for r in rs:
if r.dateArrivee > r.dateDepart:
|
print(r, r.id, r.client.id)
|
opendatateam/udata
|
udata/commands/images.py
|
Python
|
agpl-3.0
| 2,037
| 0
|
import logging
from collections import Counter
from udata.commands import cli, header, success
log = logging.getLogger(__name__)
@cli.group('images')
def grp():
'''Images related operations'''
pass
def render_or_skip(obj, attr):
try:
getattr(obj, attr).rerender()
obj.save()
return 1
except Exception as e:
log.warning('Skipped "%s": %s(%s)', obj, e.__class__.__name__, e)
return 0
@grp.command()
def render():
'''Force (re)rendering stored images'''
from udata.core.organization.models import Organization
from udata.core.post.models import Post
from udata.core.reuse.models import Reuse
from udata.core.user.models import User
header('Rendering images')
count = Counter()
total = Counter()
organizations = Organization.objects(logo__exists=True)
total['orgs'] = organizations.count()
log.info('Processing {0} organizations logos'.format(total['orgs']))
for org in organizations:
count['orgs'] += render_or_skip(org, 'logo')
users = User.objects(avatar__exists=True)
total['users'] = users.count()
log.info('Processing {0} user avatars'.format(total['users']))
for user in user
|
s:
count['users'] += render_or_skip(user, 'avatar')
posts = Post.objects(image__exists=True)
total['posts'] = posts.count()
log.info('P
|
rocessing {0} post images'.format(total['posts']))
for post in posts:
count['posts'] += render_or_skip(post, 'image')
reuses = Reuse.objects(image__exists=True)
total['reuses'] = reuses.count()
log.info('Processing {0} reuse images'.format(total['reuses']))
for reuse in reuses:
count['reuses'] += render_or_skip(reuse, 'image')
log.info('''Summary:
Organization logos: {count[orgs]}/{total[orgs]}
User avatars: {count[users]}/{total[users]}
Post images: {count[posts]}/{total[posts]}
Reuse images: {count[reuses]}/{total[reuses]}
'''.format(count=count, total=total))
success('Images rendered')
|
daily-bruin/meow
|
meow/scheduler/migrations/0010_auto_20190716_1418.py
|
Python
|
agpl-3.0
| 382
| 0
|
# Generated by Django 2
|
.0.4 on 2019-07-16 21:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0009_auto_2019
|
0607_1518'),
]
operations = [
migrations.RenameField(
model_name='smpost',
old_name='post_instagram',
new_name='post_newsletter',
),
]
|
lowRISC/opentitan
|
util/design/keccak_rc.py
|
Python
|
apache-2.0
| 1,923
| 0.00312
|
#!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Calculate Round Constant
"""
import argparse
import bitarray as ba
import logging as log
def main():
parser = argparse.ArgumentParser(
prog="keccak round constant generator",
description=
'''This tool generates the round constants based on the given max round number'''
)
parser.add_argument(
'-r',
type=int,
default=24,
help='''Max Round valu
|
e. Default is SHA3 Keccak round %(default)''')
parser.add_argument('--verbose', '-v', action='store_true', help='Verbose')
args = parser.parse_args()
if (args.verbose):
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if args.r < 1:
log.error("Max Round value shoul
|
d be greater than 0")
# Create 0..255 bit array
rc = ba.bitarray(256)
rc.setall(0)
r = ba.bitarray('10000000')
rc[0] = True # t%255 == 0 -> 1
for i in range(1, 256):
# Update from t=1 to t=255
r_d = ba.bitarray('0') + r
if r_d[8]:
#Flip 0,4,5,6
r = r_d[0:8] ^ ba.bitarray('10001110')
else:
r = r_d[0:8]
rc[i] = r[0]
## Print rc
print(rc)
## Round
rcs = [] # Each entry represent the round
for rnd in range(0, args.r):
# Let RC=0
rndconst = ba.bitarray(64)
rndconst.setall(0)
# for j [0 .. L] RC[2**j-1] = rc(j+7*rnd)
for j in range(0, 7): #0 to 6
rndconst[2**j - 1] = rc[(j + 7 * rnd) % 255]
print("64'h{}, // Round {}".format(rndhex(rndconst), rnd))
def rndhex(bit) -> str:
return bit[::-1].tobytes().hex()
if __name__ == "__main__":
main()
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/ship/components/armor/shared_arm_mandal_enhanced_heavy_composite.py
|
Python
|
mit
| 514
| 0.042802
|
#### NOTICE
|
: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/armor/shared_arm_mandal_enhanced_heavy_composite.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","arm_mandal_enhanced_heavy_composite_n")
#### BEGIN MODIF
|
ICATIONS ####
#### END MODIFICATIONS ####
return result
|
avaitla/Haskell-to-C---Bridge
|
pygccxml-1.0.0/pygccxml/parser/directory_cache.py
|
Python
|
bsd-3-clause
| 18,986
| 0.003476
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# The initial version of the directory_cache_t class was written
# by Matthias Baas (baas@ira.uka.de).
"""Directory cache implementation.
This module contains the implementation of a cache that uses individual
files stored in a dedicated cache directory to store the cached contents.
The cache class is L{directory_cache_t} which can be passed to the C{cache}
argument of the L{parse()} function.
"""
import os, os.path, gzip, md5
import cPickle
import declarations_cache
class index_entry_t:
"""Entry of the index table in the directory cache index.
Each cached header file (i.e. each *.cache file) has a corresponding
index_entry_t object. This object is used to determine whether the
cache file with the declarations is still valid or not.
This class is a helper class for the directory_cache_t class.
"""
def __init__( self, filesigs, configsig ):
"""Constructor.
filesigs is a list of tuples (fileid, sig)...
configsig is the signature of the configuration object.
"""
self.filesigs = filesigs
self.configsig = configsig
def __getstate__(self):
return (self.filesigs, self.configsig)
def __setstate__(self, state):
self.filesigs, self.configsig = state
class directory_cache_t ( declarations_cache.cache_base_t ):
"""Cache class that stores its data as multiple files inside a directory.
The cache stores one index file called "index.dat" which is always
read by the cache when the cache object is created. Each header file
will have its corresponding *.cache file that stores the declarations
found in the header file. The index file is used to determine whether
a *.cache file is still valid or not (by checking if one of the dependent
files (i.e. the header file itself and all included files) have been
modified since the last run).
"""
def __init__( self, dir="cache", compression=False, md5_sigs=True ):
"""Constructor.
dir is the cache directory (it is created if it does not exist).
If compression is set to True the cache files will be compressed
using gzip.
md5_sigs determines whether file modifications is checked by
computing a md5 digest or by checking the modification date.
"""
declarations_cache.cache_base_t.__init__(self)
# Cache directory
self.__dir = os.path.abspath(dir)
# Flag that determines whether the cache files will be compressed
self.__compression = compression
# Flag that determines whether the signature is a md5 digest or
# the modification time
# (this flag is passed to the filename_repository_t class)
self.__md5_sigs = md5_sigs
|
# Filename repository
self.__filename_rep = filename_repository_t(self.__md5_sigs)
# Index dictionary (Key is the value returned by _create_cache_key()
# (which is based on the header file name) and value is an
# index_entry_t object)
self.__index = {}
# Flag that indicates whether the index was modified
self.__modified_
|
flag = False
# Check if dir refers to an existing file...
if os.path.isfile(self.__dir):
raise ValueError, "Cannot use %s as cache directory. There is already a file with that name."%self.__dir
# Load the cache or create the cache directory...
if os.path.isdir(self.__dir):
self._load()
else:
# Create the cache directory...
os.mkdir(self.__dir)
def flush(self):
"""Save the index table to disk."""
self._save()
# self.__filename_rep._dump()
def update(self, source_file, configuration, declarations, included_files):
"""Replace a cache entry by a new value.
@param source_file: Header file name.
@type source_file: str
@param configuration: Configuration object.
@type configuration: L{config_t}
@param declarations: Declarations contained in the header file.
@type declarations: picklable object
@param included_files: Dependent files
@type included_files: list of str
"""
# Normlize all paths...
source_file = os.path.normpath(source_file)
included_files = map(lambda p: os.path.normpath(p), included_files)
# Create the list of dependent files. This is the included_files list
# + the source file. Duplicate names are removed.
dependent_files = {}
for name in [source_file]+included_files:
dependent_files[name] = 1
dependent_files = dependent_files.keys()
key = self._create_cache_key(source_file)
# Remove an existing entry (if there is one)
# After calling this method, it is guaranteed that __index[key]
# does not exist anymore.
self._remove_entry(source_file, key)
# Create a new entry...
# Create the sigs of all dependent files...
filesigs = []
for filename in dependent_files:
id_,sig = self.__filename_rep.acquire_filename(filename)
filesigs.append((id_,sig))
configsig = self._create_config_signature(configuration)
entry = index_entry_t(filesigs, configsig)
self.__index[key] = entry
self.__modified_flag = True
# Write the declarations into the cache file...
cachefilename = self._create_cache_filename(source_file)
self._write_file(cachefilename, declarations)
def cached_value(self, source_file, configuration):
"""Return the cached declarations or None.
@param source_file: Header file name
@type source_file: str
@param configuration: Configuration object
@type configuration: L{config_t}
@return: Cached declarations or None
"""
# Check if the cache contains an entry for source_file
key = self._create_cache_key(source_file)
entry = self.__index.get(key)
if entry==None:
# print "CACHE: %s: Not cached"%source_file
return None
# Check if the entry is still valid. It is not valid if:
# - the source_file has been updated
# - the configuration object has changed (i.e. the header is parsed
# by gccxml with different settings which may influence the
# declarations)
# - the included files have been updated
# (this list is part of the cache entry as it cannot be known
# by the caller when cached_value() is called. It was instead
# passed to update())
# Check if the config is different...
configsig = self._create_config_signature(configuration)
if configsig!=entry.configsig:
# print "CACHE: %s: Config mismatch"%source_file
return None
# Check if any of the dependent files has been modified...
for id_, sig in entry.filesigs:
if self.__filename_rep.is_file_modified(id_, sig):
# print "CACHE: %s: Entry not up to date"%source_file
return None
# Load and return the cached declarations
cachefilename = self._create_cache_filename(source_file)
decls = self._read_file(cachefilename)
# print "CACHE: Using cached decls for",source_file
return decls
def _load(self):
"""Load the cache.
Loads the file index.dat which contains the index table and
the file name repository.
This method is called by the constructor.
"""
indexfilename = os.path.join(self.__dir, "index.dat")
if os.path.exists(indexfilename):
|
PandaWei/avocado
|
avocado/utils/software_manager.py
|
Python
|
gpl-2.0
| 32,607
| 0.000123
|
#!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
# Author: Higor Vieira Alves <halves@br.ibm.com>
# Author: Ramon de Carvalho Valle <rcvalle@br.ibm.com>
#
# This code was adapted from the autotest project,
# client/shared/software_manager.py
"""
Software package management library.
This is an abstraction layer on top of the existing distributions high level
package managers. It supports package operations useful for testing purposes,
and multiple high level package managers (here called backends). If you want
to make this lib to support your particular package manager/distro, please
implement the given backend class.
:author: Higor Vieira Alves <halves@br.ibm.com>
:author: Lucas Meneghel Rodrigues <lmr@redhat.com>
:author: Ramon de Carvalho Valle <rcvalle@br.ibm.com>
:copyright: IBM 2008-2009
:copyright: Red Hat 2009-2014
"""
import os
import re
import logging
import optparse
import tempfile
try:
import yum
except ImportError:
HAS_YUM_MODULE = False
else:
HAS_YUM_MODULE = True
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
from . import process
from . import data_factory
from . import distro
from . import path as utils_path
log = logging.getLogger('avocado.test')
SUPPORTED_PACKAGE_MANAGERS = ['apt-get', 'yum', 'zypper', 'dnf']
class SystemInspector(object):
|
"""
System inspector class.
This may grow up to include more complete reports of
|
operating system and
machine properties.
"""
def __init__(self):
"""
Probe system, and save information for future reference.
"""
self.distro = distro.detect().name
def get_package_management(self):
"""
Determine the supported package management systems present on the
system. If more than one package management system installed, try
to find the best supported system.
"""
list_supported = []
for high_level_pm in SUPPORTED_PACKAGE_MANAGERS:
try:
utils_path.find_command(high_level_pm)
list_supported.append(high_level_pm)
except utils_path.CmdNotFoundError:
pass
pm_supported = None
if len(list_supported) == 0:
pm_supported = None
if len(list_supported) == 1:
pm_supported = list_supported[0]
elif len(list_supported) > 1:
if ('apt-get' in list_supported and
self.distro in ('debian', 'ubuntu')):
pm_supported = 'apt-get'
elif ('dnf' in list_supported and
self.distro in ('redhat', 'fedora')):
pm_supported = 'dnf'
elif ('yum' in list_supported and
self.distro in ('redhat', 'fedora')):
pm_supported = 'yum'
else:
pm_supported = list_supported[0]
return pm_supported
class SoftwareManager(object):
"""
Package management abstraction layer.
It supports a set of common package operations for testing purposes, and it
uses the concept of a backend, a helper class that implements the set of
operations of a given package management tool.
"""
def __init__(self):
"""
Lazily instantiate the object
"""
self.initialized = False
self.backend = None
self.lowlevel_base_command = None
self.base_command = None
self.pm_version = None
def _init_on_demand(self):
"""
Determines the best supported package management system for the given
operating system running and initializes the appropriate backend.
"""
if not self.initialized:
inspector = SystemInspector()
backend_type = inspector.get_package_management()
backend_mapping = {'apt-get': AptBackend,
'yum': YumBackend,
'dnf': DnfBackend,
'zypper': ZypperBackend}
if backend_type not in backend_mapping.keys():
raise NotImplementedError('Unimplemented package management '
'system: %s.' % backend_type)
backend = backend_mapping[backend_type]
self.backend = backend()
self.initialized = True
def __getattr__(self, name):
self._init_on_demand()
return self.backend.__getattribute__(name)
class BaseBackend(object):
"""
This class implements all common methods among backends.
"""
def install_what_provides(self, path):
"""
Installs package that provides [path].
:param path: Path to file.
"""
provides = self.provides(path)
if provides is not None:
return self.install(provides)
else:
log.warning('No package seems to provide %s', path)
return False
class RpmBackend(BaseBackend):
"""
This class implements operations executed with the rpm package manager.
rpm is a lower level package manager, used by higher level managers such
as yum and zypper.
"""
PACKAGE_TYPE = 'rpm'
SOFTWARE_COMPONENT_QRY = (
PACKAGE_TYPE + ' ' +
'%{NAME} %{VERSION} %{RELEASE} %{SIGMD5} %{ARCH}')
def __init__(self):
self.lowlevel_base_cmd = utils_path.find_command('rpm')
def _check_installed_version(self, name, version):
"""
Helper for the check_installed public method.
:param name: Package name.
:param version: Package version.
"""
cmd = (self.lowlevel_base_cmd + ' -q --qf %{VERSION} ' + name)
inst_version = process.system_output(cmd, ignore_status=True)
if 'not installed' in inst_version:
return False
return bool(inst_version >= version)
def check_installed(self, name, version=None, arch=None):
"""
Check if package [name] is installed.
:param name: Package name.
:param version: Package version.
:param arch: Package architecture.
"""
if arch:
cmd = (self.lowlevel_base_cmd + ' -q --qf %{ARCH} ' + name)
inst_archs = process.system_output(cmd, ignore_status=True)
inst_archs = inst_archs.split('\n')
for inst_arch in inst_archs:
if inst_arch == arch:
return self._check_installed_version(name, version)
return False
elif version:
return self._check_installed_version(name, version)
else:
cmd = 'rpm -q ' + name
try:
process.system(cmd)
return True
except process.CmdError:
return False
def list_all(self, software_components=True):
"""
List all installed packages.
:param software_components: log in a format suitable for the
SoftwareComponent schema
"""
log.debug("Listing all system packages (may take a while)")
if software_components:
cmd_format = "rpm -qa --qf '%s' | sort"
query_format = "%s\n" % self.SOFTWARE_COMPONENT_QRY
cmd_format %= query_format
cmd_result = process.run(cmd_format, verbose=False, shell=True)
else:
cmd_result = process.run('rpm -qa | sort', verbose=False,
shell=True)
out = cmd_result.stdout.strip()
installed_packages = out.splitlines()
return installed_packages
de
|
stanleyz/pfsense-2.x-tools
|
pfsense-updateCRL.py
|
Python
|
gpl-2.0
| 2,863
| 0.008732
|
#!/usr/bin/env python
import sys
from pfsense_api import PfSenseAPI
from datetime import datetime
from pfsense_cmdline import PfSenseOptionParser
from ConfigParser import ConfigParser
from pfsense_logger import PfSenseLogger as logging
import os.path
parser = PfSenseOptionParser()
parser.add_option("--id", dest="crl_id", help="ID of the CRL to update")
parser.add_option("--name", dest="name", help="Descriptive name of the CRL", default="Imported CRL")
parser.add_option("--crl", dest="crl", help="File containing CRL in PEM format", metavar="CRL_FILE")
parser.add_option("--ssl_verification", dest="ssl_verification", help="Whether SSL should be verified or not, valid values are yes/no, true/false, 1/0", default=True, metavar="yes/no")
parser.add_option("--overwrite", dest="overwrite", default=False, help="Command line options will overwrite same settings in config file", action="store_true")
(options, args) = parser.parse_args()
logger = logging.setupLogger(options.logging)
parser.check_cmd_options( options )
required_items = ['crl_id', 'crl', 'host', 'username', 'password']
options_cmdline = vars(options).copy()
del options_cmdline['config']
del options_cmdline['overwrite']
configFile = ConfigParser()
configFile.read(options.config)
api = PfSenseAPI()
for section in configFile.sections():
logger.info("Working on %s" % section)
parsed_options = parser.parse_individual_options(configFile.items(section), options_cmdline, overwrite = options.overwrite, bool_keys = ['ssl_verification'])
required_items_missed = False
missed_items = parser.check_required_options(parsed_options, required_items)
for item in missed_items:
logger.error('%s is reqired for entry %s' % ( item, section))
required_items_missed = True
if required_items_missed:
continue
if not os.path.isfile(parsed_options['crl']):
logger.error('CRL file %s does not exist?' % parsed_options['crl'])
continue
try:
crlFile = open(parsed_options['crl'], 'r')
crlData = crlFile.read()
crlFile.close()
except:
logger.error("Error while read CRL data from file %s" % parsed_options['crl'])
continue
api['options'] = parsed_options
api.login()
(rc, data, contentType) = api.call( '/system_crlmanager.php', 'POST',
apiData = {
'method': 'existing',
'descr': '%s (last refresh: %s)' % (options.name, datetime.now().isoformat()),
'crltext': crlData,
'submit': 'Save'
},
it
|
emData = {
'id': parsed_options['crl_id'],
'act': 'editimported'
})
api.logout()
if rc == 302:
logger.info('CRL Update successful for %s' % (section))
else:
logger.info('CRL Update failed fo
|
r %s' % ( section))
|
chuckwired/ber-kit
|
setup.py
|
Python
|
gpl-3.0
| 1,036
| 0.007722
|
import multiprocessing
from setuptools import setup, find_packages
from ber_kit import __version__
setup(name='ber-kit',
version= __version__,
description='Toolkit to manage rolling upgrades on a Marathon cluster',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT
|
License',
'Natural Language :: English',
'Topic :: System :: Systems Administration',
],
keywords='marathon',
url='http://bitbucket.org/connectedsolutions/ber-kit',
author='Charles Rice, Cake Solutions',
author_email='devops@cakesolutions.net',
license='GNU GPLv3',
packages=find_packages(),
include_package_data=True,
install_requires = [
'
|
marathon>=0.8.6',
],
entry_points = {
'console_scripts': [
'ber-kit=ber_kit.main:main',
],
},
test_suite='nose.collector',
tests_require=[
'nose',
'mock',
'coverage',
],
zip_safe=False)
|
bitsanity/rateboard
|
moneroticker.py
|
Python
|
apache-2.0
| 2,777
| 0.022686
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys, traceback
import threading
import time
import simplejson as json
import urllib2
from PyQt4 import QtGui,QtCore
from boardlet import Boardlet
from modellet import Modellet
class MoneroTicker(Boardlet):
def __init__(self, parent, btcusd):
super(MoneroTicker, self).__init__(parent)
self.p_model = Monero( btcusd )
self.initUI()
def initUI(self):
super(MoneroTicker, self).initUI()
self.p_icon = QtGui.QLabel(self)
self.p_icon.setGeometry( self.b_imgx(), self.b_imgy(),
self.b_iconwidth(),self.b_iconheight() )
self.p_icon.setPixmap( QtGui.QPixmap(os.getcwd() + "/img/xmr.png" ) )
t = threading.Thread(target=self.periodicUpdate)
t.setDaemon(True)
t.start()
def paintEvent(self, e):
super(MoneroTicker, self).paintEvent(e)
qp = QtGui.QPainter()
qp.begin(self)
qp.setPen( self.p_grayPen )
qp.setFont( self.p_pairFont )
qp.drawText( self.b_col1x(), self.b_row1y(), 'Bittrex XMRUSD' )
qp.setPen( self.p_whitePen )
qp.setFont( self.p_normFont )
qp.drawText( self.b_col1x(), self.b_row2y() - 5,
'bid: ' + "{:06.2f}".format(self.p_model.getBestBid()) )
qp.drawText( self.b_col1x(), self.b_row3y() - 5,
|
'ask: ' + "{:06.2f}".format(self.p_model.getBestAsk()) )
qp.setFont( self.p_timeFont )
qp.setPen
|
( self.p_grayPen )
qp.drawText( self.b_imgx(), self.b_row4y(),
'Refreshed: ' + self.p_model.getLastUpdated() )
qp.end()
def periodicUpdate(self):
while(True):
st = self.getNextWaitTimeSeconds()
time.sleep( st )
self.p_model.doRefresh()
class Monero(Modellet):
def __init__(self, btcusd):
self.p_btcusd = btcusd
self.p_refreshTime = None
self.p_bestBid = '000.00'
self.p_bestAsk = '000.00'
def getBestBid(self):
return float(self.p_bestBid) * float(self.p_btcusd.p_model.getBid())
def getBestAsk(self):
return float(self.p_bestAsk) * float(self.p_btcusd.p_model.getAsk())
def doRefresh(self):
headers = {'User-agent' : 'Mozilla/5.0'}
req = urllib2.Request( 'https://bittrex.com/api/v1.1/public/getticker?market=BTC-XMR', None, headers )
try:
resp = urllib2.urlopen(req).read()
self.p_bestBid = str( json.loads(resp)['result']['Bid'] )
self.p_bestAsk = str( json.loads(resp)['result']['Ask'] )
super(Monero, self).setFaultFlag(False)
super(Monero, self).setLastUpdatedNow()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print ''.join('!! ' + line for line in lines)
super(Monero, self).setFaultFlag(True)
|
met-office-ocean/obsoper
|
benchmarks/bench_walk.py
|
Python
|
bsd-3-clause
| 551
| 0
|
"""Benchmark Walk algorithm"""
import numpy as np
import bench
import obsoper.walk
class BenchmarkWalk(bench.Suite):
def setUp(self):
longitudes, latitudes = np.meshgrid([1, 2, 3],
[1, 2, 3],
indexing="ij")
self.fixture = obsoper.walk.Walk.from_lonlats(longitudes,
latitudes)
def bench_detect(self):
for _ in range(10):
|
self.fixture.detect((2.9, 2.9), i=0, j=0)
|
|
schleichdi2/OpenNfr_E2_Gui-6.0
|
lib/python/Components/SystemInfo.py
|
Python
|
gpl-2.0
| 3,888
| 0.010031
|
from boxbranding import getBoxType, getMachineProcModel, getMachineBuild
from os import path
from enigma import eDVBResourceManager, Misc_Options
from Tools.Directories import fileExists, fileCheck
from Tools.HardwareInfo import HardwareInfo
SystemInfo = { }
#FIXMEE...
def getNumVideoDecoders():
idx = 0
while fileExists("/dev/dvb/adapter0/video%d"% idx, 'f'):
idx += 1
return idx
SystemInfo["NumVideoDecoders"] = getNumVideoDecoders()
SystemInfo["PIPAvailable"] = SystemInfo["NumVideoDecoders"] > 1
SystemInfo["CanMeasureFrontendInputPower"] = eDVBResourceManager.getInstance().canMeasureFrontendInputPower()
def countFrontpanelLEDs():
leds = 0
if fileExists("/proc/stb/fp/led_set_pattern"):
leds += 1
while fileExists("/proc/stb/fp/led%d_pattern" % leds):
leds += 1
return leds
SystemInfo["12V_Output"] = Misc_Options.getInstance().detected_12V_output()
SystemInfo["ZapMode"] = fileCheck("/proc/stb/video/zapmode") or fileCheck("/proc/stb/video/zapping_mode")
SystemInfo["NumFrontpanelLEDs"] = countFrontpanelLEDs()
SystemInfo["FrontpanelDisplay"] = fileExists("/dev/dbox/oled0") or fileExists("/dev/dbox/lcd0")
SystemInfo["OledDisplay"] = fileExists("/dev/dbox/oled0") or getBoxType() in ('osminiplus')
SystemInfo["LcdDisplay"] = fileExists("/dev/dbox/lcd0")
SystemInfo["FBLCDDisplay"] = fileCheck("/proc/stb/fb/sd_detach")
SystemInfo["VfdDisplay"] = getBoxType() no
|
t in ('vuultimo', 'xpeedlx3', 'et10000', 'mutant2400', 'quadbox2400', 'atemionemesis') and fileExists("/dev/dbox/oled0")
SystemInfo["DeepstandbySupport"] = HardwareInfo().has_deepstandby()
SystemInfo["Fan"] = fileCheck("/proc/stb/fp/fan")
SystemInfo["FanPWM"] = SystemInfo["Fan"] and fileCheck("/proc/stb/fp/fan_pwm")
SystemInfo["StandbyPowerLed"] = fileExists("/proc/stb/
|
power/standbyled")
if getBoxType() in ('gbquad', 'gbquadplus','gb800ueplus', 'gb800seplus', 'gbipbox'):
SystemInfo["WOL"] = False
else:
SystemInfo["WOL"] = fileCheck("/proc/stb/power/wol") or fileCheck("/proc/stb/fp/wol")
SystemInfo["HDMICEC"] = (fileExists("/dev/hdmi_cec") or fileExists("/dev/misc/hdmi_cec0")) and fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/HdmiCEC/plugin.pyo")
SystemInfo["SABSetup"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/SABnzbd/plugin.pyo")
SystemInfo["SeekStatePlay"] = False
SystemInfo["GraphicLCD"] = getBoxType() in ('vuultimo', 'xpeedlx3', 'et10000', 'mutant2400', 'quadbox2400', 'atemionemesis')
SystemInfo["Blindscan"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/Blindscan/plugin.pyo")
SystemInfo["Satfinder"] = fileExists("/usr/lib/enigma2/python/Plugins/SystemPlugins/Satfinder/plugin.pyo")
SystemInfo["HasExternalPIP"] = getMachineBuild() not in ('et9x00', 'et6x00', 'et5x00') and fileCheck("/proc/stb/vmpeg/1/external")
SystemInfo["hasPIPVisibleProc"] = fileCheck("/proc/stb/vmpeg/1/visible")
SystemInfo["VideoDestinationConfigurable"] = fileExists("/proc/stb/vmpeg/0/dst_left")
SystemInfo["GBWOL"] = fileExists("/usr/bin/gigablue_wol")
SystemInfo["LCDSKINSetup"] = path.exists("/usr/share/enigma2/display")
SystemInfo["CIHelper"] = fileExists("/usr/bin/cihelper")
SystemInfo["isGBIPBOX"] = fileExists("/usr/lib/enigma2/python/gbipbox.so")
SystemInfo["HaveMultiBoot"] = fileCheck("/boot/STARTUP") or fileCheck("/boot/STARTUP_1")
SystemInfo["HaveCISSL"] = fileCheck("/etc/ssl/certs/customer.pem") and fileCheck("/etc/ssl/certs/device.pem")
SystemInfo["LCDMiniTV"] = fileExists("/proc/stb/lcd/mode")
SystemInfo["LCDMiniTV4k"] = fileExists("/proc/stb/lcd/live_enable")
SystemInfo["LCDMiniTVPiP"] = SystemInfo["LCDMiniTV"] and getBoxType() != 'gb800ueplus'
SystemInfo["LcdLiveTV"] = fileCheck("/proc/stb/fb/sd_detach")
SystemInfo["HaveTouchSensor"] = getBoxType() in ('dm520', 'dm525', 'dm900')
SystemInfo["DefaultDisplayBrightness"] = getBoxType() == 'dm900' and 8 or 5
SystemInfo["RecoveryMode"] = fileCheck("/proc/stb/fp/boot_mode")
|
Loudr/pale
|
pale/fields/timestamp.py
|
Python
|
mit
| 189
| 0.005291
|
from pale.fields.string import StringFiel
|
d
class TimestampField(StringField):
"""A field for timestamp st
|
rings."""
value_type = 'timestamp'
# TODO - timestamp field rendering
|
jlongever/redfish-client-python
|
on_http_redfish_1_0/models/thermal_1_0_0_temperature.py
|
Python
|
apache-2.0
| 8,295
| 0.00217
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Thermal100Temperature(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Thermal100Temperature - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'member_id': 'str',
'oem': 'ResourceOem',
'physic
|
al_context': 'Phy
|
sicalContext100PhysicalContext',
'related_item': 'list[Odata400IdRef]',
'related_itemodata_count': 'Odata400Count',
'related_itemodata_navigation_link': 'Odata400IdRef',
'status': 'ResourceStatus'
}
self.attribute_map = {
'member_id': 'MemberId',
'oem': 'Oem',
'physical_context': 'PhysicalContext',
'related_item': 'RelatedItem',
'related_itemodata_count': 'RelatedItem@odata.count',
'related_itemodata_navigation_link': 'RelatedItem@odata.navigationLink',
'status': 'Status'
}
self._member_id = None
self._oem = None
self._physical_context = None
self._related_item = None
self._related_itemodata_count = None
self._related_itemodata_navigation_link = None
self._status = None
@property
def member_id(self):
"""
Gets the member_id of this Thermal100Temperature.
This is the identifier for the member within the collection.
:return: The member_id of this Thermal100Temperature.
:rtype: str
"""
return self._member_id
@member_id.setter
def member_id(self, member_id):
"""
Sets the member_id of this Thermal100Temperature.
This is the identifier for the member within the collection.
:param member_id: The member_id of this Thermal100Temperature.
:type: str
"""
self._member_id = member_id
@property
def oem(self):
"""
Gets the oem of this Thermal100Temperature.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:return: The oem of this Thermal100Temperature.
:rtype: ResourceOem
"""
return self._oem
@oem.setter
def oem(self, oem):
"""
Sets the oem of this Thermal100Temperature.
This is the manufacturer/provider specific extension moniker used to divide the Oem object into sections.
:param oem: The oem of this Thermal100Temperature.
:type: ResourceOem
"""
self._oem = oem
@property
def physical_context(self):
"""
Gets the physical_context of this Thermal100Temperature.
Describes the area or device to which this temperature measurement applies.
:return: The physical_context of this Thermal100Temperature.
:rtype: PhysicalContext100PhysicalContext
"""
return self._physical_context
@physical_context.setter
def physical_context(self, physical_context):
"""
Sets the physical_context of this Thermal100Temperature.
Describes the area or device to which this temperature measurement applies.
:param physical_context: The physical_context of this Thermal100Temperature.
:type: PhysicalContext100PhysicalContext
"""
self._physical_context = physical_context
@property
def related_item(self):
"""
Gets the related_item of this Thermal100Temperature.
Describes the areas or devices to which this temperature measurement applies.
:return: The related_item of this Thermal100Temperature.
:rtype: list[Odata400IdRef]
"""
return self._related_item
@related_item.setter
def related_item(self, related_item):
"""
Sets the related_item of this Thermal100Temperature.
Describes the areas or devices to which this temperature measurement applies.
:param related_item: The related_item of this Thermal100Temperature.
:type: list[Odata400IdRef]
"""
self._related_item = related_item
@property
def related_itemodata_count(self):
"""
Gets the related_itemodata_count of this Thermal100Temperature.
:return: The related_itemodata_count of this Thermal100Temperature.
:rtype: Odata400Count
"""
return self._related_itemodata_count
@related_itemodata_count.setter
def related_itemodata_count(self, related_itemodata_count):
"""
Sets the related_itemodata_count of this Thermal100Temperature.
:param related_itemodata_count: The related_itemodata_count of this Thermal100Temperature.
:type: Odata400Count
"""
self._related_itemodata_count = related_itemodata_count
@property
def related_itemodata_navigation_link(self):
"""
Gets the related_itemodata_navigation_link of this Thermal100Temperature.
:return: The related_itemodata_navigation_link of this Thermal100Temperature.
:rtype: Odata400IdRef
"""
return self._related_itemodata_navigation_link
@related_itemodata_navigation_link.setter
def related_itemodata_navigation_link(self, related_itemodata_navigation_link):
"""
Sets the related_itemodata_navigation_link of this Thermal100Temperature.
:param related_itemodata_navigation_link: The related_itemodata_navigation_link of this Thermal100Temperature.
:type: Odata400IdRef
"""
self._related_itemodata_navigation_link = related_itemodata_navigation_link
@property
def status(self):
"""
Gets the status of this Thermal100Temperature.
:return: The status of this Thermal100Temperature.
:rtype: ResourceStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Thermal100Temperature.
:param status: The status of this Thermal100Temperature.
:type: ResourceStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, oth
|
cryptickp/heat
|
heat/engine/clients/os/keystone.py
|
Python
|
apache-2.0
| 5,676
| 0
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You
|
may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in wr
|
iting, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions
from heat.common import exception
from heat.common import heat_keystoneclient as hkc
from heat.engine.clients import client_plugin
from heat.engine import constraints
class KeystoneClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [IDENTITY] = ['identity']
def _create(self):
return hkc.KeystoneClient(self.context)
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, exceptions.RequestEntityTooLarge)
def is_conflict(self, ex):
return isinstance(ex, exceptions.Conflict)
def get_role_id(self, role):
try:
role_obj = self.client().client.roles.get(role)
return role_obj.id
except exceptions.NotFound:
role_list = self.client().client.roles.list(name=role)
for role_obj in role_list:
if role_obj.name == role:
return role_obj.id
raise exception.EntityNotFound(entity='KeystoneRole', name=role)
def get_project_id(self, project):
try:
project_obj = self.client().client.projects.get(project)
return project_obj.id
except exceptions.NotFound:
project_list = self.client().client.projects.list(name=project)
for project_obj in project_list:
if project_obj.name == project:
return project_obj.id
raise exception.EntityNotFound(entity='KeystoneProject',
name=project)
def get_domain_id(self, domain):
try:
domain_obj = self.client().client.domains.get(domain)
return domain_obj.id
except exceptions.NotFound:
domain_list = self.client().client.domains.list(name=domain)
for domain_obj in domain_list:
if domain_obj.name == domain:
return domain_obj.id
raise exception.EntityNotFound(entity='KeystoneDomain', name=domain)
def get_group_id(self, group):
try:
group_obj = self.client().client.groups.get(group)
return group_obj.id
except exceptions.NotFound:
group_list = self.client().client.groups.list(name=group)
for group_obj in group_list:
if group_obj.name == group:
return group_obj.id
raise exception.EntityNotFound(entity='KeystoneGroup', name=group)
def get_service_id(self, service):
try:
service_obj = self.client().client.services.get(service)
return service_obj.id
except exceptions.NotFound:
service_list = self.client().client.services.list(name=service)
if len(service_list) == 1:
return service_list[0].id
elif len(service_list) > 1:
raise exception.KeystoneServiceNameConflict(service=service)
else:
raise exception.EntityNotFound(entity='KeystoneService',
name=service)
def get_user_id(self, user):
try:
user_obj = self.client().client.users.get(user)
return user_obj.id
except exceptions.NotFound:
user_list = self.client().client.users.list(name=user)
for user_obj in user_list:
if user_obj.name == user:
return user_obj.id
raise exception.EntityNotFound(entity='KeystoneUser', name=user)
class KeystoneRoleConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, role):
client.client_plugin('keystone').get_role_id(role)
class KeystoneDomainConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, domain):
client.client_plugin('keystone').get_domain_id(domain)
class KeystoneProjectConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, project):
client.client_plugin('keystone').get_project_id(project)
class KeystoneGroupConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, group):
client.client_plugin('keystone').get_group_id(group)
class KeystoneServiceConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,
exception.KeystoneServiceNameConflict,)
def validate_with_client(self, client, service):
client.client_plugin('keystone').get_service_id(service)
class KeystoneUserConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.EntityNotFound,)
def validate_with_client(self, client, user):
client.client_plugin('keystone').get_user_id(user)
|
anhstudios/swganh
|
data/scripts/templates/object/static/naboo/shared_waterfall_naboo_falls_01.py
|
Python
|
mit
| 450
| 0.046667
|
#### NOTICE: THIS FILE IS AUTOGENERATED
##
|
## MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/naboo/shared_wa
|
terfall_naboo_falls_01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
jart/tensorflow
|
tensorflow/contrib/autograph/core/errors.py
|
Python
|
apache-2.0
| 10,511
| 0.007326
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Error rewriting logic.
Contains the functions responsible for rewriting tracebacks of errors raised
in AutoGraph (AG) code to refer to user written code, so that errors only refer
to the original user code.
When 'user code' is used in comments it refers to the original source code that
the user wrote and is converting using AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import logging
import sys
import traceback
from tensorflow.contrib.autograph.pyct.origin_info import CodeLocation
from tensorflow.python.framework import errors_impl
from tensorflow.python.util import tf_inspect
class GraphConstructionError(Exception):
"""Error for graph construction errors from AutoGraph generated code."""
def __init__(self, original_error, custom_traceback):
self.original_error = original_error
self.custom_traceback = custom_traceback
super(GraphConstructionError, self).__init__()
def __str__(self):
traceback_str = ''.join(traceback.format_list(self.custom_traceback))
return ('Traceback (most recent call last):\n' + traceback_str + '\n' + str(
self.original_error) + '\n')
class TfRuntimeError(Exception):
"""Error wrapper for runtime errors raised by AutoGraph generated code."""
def __init__(self, op_name, op_message, custom_traceback):
self.op_name = op_name
self.op_message = op_message
self.custom_traceback = custom_traceback
super(TfRuntimeError, self).__init__()
def __str__(self):
message = '%s\n\nCaused by op %r, defined at:\n' % (self.op_message,
self.op_name)
return message + ''.join(traceback.format_list(self.custom_traceback))
def _rewrite_frame(source_map, cleaned_traceback, stack_frame_indices):
"""Rewrites the stack frames at the given indices using the given source map.
Args:
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
cleaned_traceback: List[Tuple[text, text, text, text]], the current
traceback.
stack_frame_indices: Iterable[Int], frame indices to possibly rewrite if
there are matching source mapping keys.
Returns:
None
"""
for frame_index in stack_frame_indices:
# (file_path, line number, function name, code)
file_path, line_number, _, _ = cleaned_traceback[frame_index]
source_map_key = CodeLocation(file_path=file_path, line_number=line_number)
found_mapping = source_map_key in source_map
if found_mapping:
cleaned_traceback[frame_index] = source_map[source_map_key].as_frame()
# TODO(znado): Make more robust to name changes in the rewriting logic.
def _remove_rewrite_frames(tb):
"""Remove stack frames containing the error rewriting logic."""
cleaned_tb = []
for f in tb:
if 'ag__.rewrite_graph_c
|
onstruction_error' not in f[3]:
clea
|
ned_tb.append(f)
return cleaned_tb
def rewrite_graph_construction_error(source_map):
"""Rewrites errors raised by non-AG APIs inside AG generated code.
Meant to be called from the try/except block inside each AutoGraph generated
function. Only rewrites the traceback frames corresponding to the function
that this is called from. When we raise a GraphConstructionError at the end
it is then caught by calling functions, where they can be responsible for
rewriting their own frames.
Args:
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
Raises:
GraphConstructionError: The rewritten underlying error.
Exception: The underlying error, if it could not be rewritten.
"""
error_info = sys.exc_info()
_, original_error, e_traceback = error_info
assert original_error is not None
try:
_, _, _, func_name, _, _ = tf_inspect.stack()[1]
# The latest function call is added to the beginning of a traceback, but
# when rewriting the traceback of multiple function calls the previous
# functions' except blocks may have already rewritten their own frames so
# we want to copy over all of the previous frames. We may have rewritten
# previous frames only if the error is a GraphConstructionError.
if isinstance(original_error, GraphConstructionError):
cleaned_traceback = traceback.extract_tb(e_traceback)
previous_traceback = original_error.custom_traceback
cleaned_traceback = [cleaned_traceback[0]] + previous_traceback
else:
cleaned_traceback = traceback.extract_tb(e_traceback)
cleaned_traceback = _remove_rewrite_frames(cleaned_traceback)
current_frame_indices = []
# This code is meant to be called from the try/except block that wraps a
# function body. Here we look for all frames that came from the function
# that this wraps, look for any matching line numbers in the source
# mapping, and then rewrite them if matches are found.
for fi, frame in enumerate(cleaned_traceback):
_, _, frame_func_name, _ = frame
if frame_func_name == func_name:
current_frame_indices.append(fi)
break
if current_frame_indices:
_rewrite_frame(source_map, cleaned_traceback, current_frame_indices)
if isinstance(original_error, GraphConstructionError):
original_error.custom_traceback = cleaned_traceback
new_error = original_error
else:
new_error = GraphConstructionError(original_error, cleaned_traceback)
except Exception:
logging.exception('Error while rewriting AutoGraph error:')
raise original_error
else:
raise new_error
finally:
# Addresses warning https://docs.python.org/2/library/sys.html#sys.exc_info.
del e_traceback
def rewrite_tf_runtime_error(error, source_map):
"""Rewrites TensorFlow runtime errors raised by ops created in AG code.
Args:
error: error_impl.OpError, an TensorFlow error that will have its traceback
rewritten.
source_map: Dict[CodeLocation, OriginInfo], a mapping between the user and
AG generated code.
Returns:
A TfRuntimeError with a traceback rewritten according to the given
source mapping.
"""
# Check for cases where we leave a user method and re-enter it in the
# traceback. This is done by looking at the function names when the
# filenames are from any files the user code is in. If we find a case where
# we return to a user method after leaving it then we cut out the frames in
# between because we assume this means these in between frames are from
# internal AutoGraph code that shouldn't be included.
#
# An example of this is:
#
# File "file1.py", line 57, in my_func
# ...
# File "control_flow_ops.py", line 231, in cond
# ...
# File "control_flow_ops.py", line 1039, in inner_cond
# ...
# File "file1.py", line 68, in my_func
# ...
#
# Where we would remove the control_flow_ops.py frames because we re-enter
# my_func in file1.py.
#
# The source map keys are (file_path, line_number) so get the set of all user
# file_paths.
try:
all_user_files = set(k.file_path for k in source_map)
cleaned_traceback = []
last_user_frame_index = None
last_user_user_file_path = None
last_user_user_fn_name = None
for fi, frame in enumerate(error.op.traceback):
frame_file_path, frame_line_number, _, _ = frame
src_map_key = CodeLocation(
file_path=frame_file_path, line_number=frame_line_num
|
gem/sidd
|
utils/xml.py
|
Python
|
agpl-3.0
| 862
| 0.011601
|
# Copyright (c) 2011-2013, ImageCat Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope
|
that it w
|
ill be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
XML helper functions
"""
def get_node_attrib(node, attrib_name):
try:
return node.attrib[attrib_name]
except:
return ''
|
kawamon/hue
|
desktop/core/ext-py/pyasn1-modules-0.2.6/tools/crldump.py
|
Python
|
apache-2.0
| 1,086
| 0.001842
|
#!/usr/bin/env python
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
# Read X.509 CRL on stdin, print them pretty and encode back into
# original wire format.
# CRL can be generated with "openssl openssl ca -gencrl ..." commands.
#
import sys
from pyasn1.codec.der import decoder
from pyasn1.codec.der import encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2459
if len(sys.argv) != 1:
print(""
|
"Usage:
$ cat crl.pem | %s""" % sys.argv[0])
sys.exit(-1)
asn1Spec = rfc2459.CertificateList()
cnt = 0
while True:
idx, substrate = pem.readPemBlocksFromFile(sys.stdin, ('-----BEGIN X509 CRL-----', '-----END X509 CRL-----'))
if not substrate:
break
key, rest = decoder.decode(substrate, asn1Spec=asn1Spec)
if rest:
substrate = substrate[:-len(rest)]
print(key.prettyPrint())
assert encoder.encode(key) == substrate, 'pkcs8 recode fails'
cnt += 1
pri
|
nt('*** %s CRL(s) re/serialized' % cnt)
|
linkcheck/linkchecker
|
linkcheck/htmlutil/__init__.py
|
Python
|
gpl-2.0
| 770
| 0
|
# Copyright (C) 2008-2014 Bastian Kleineidam
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed i
|
n the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Genera
|
l Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
HTML utils
"""
|
matk86/pymatgen
|
pymatgen/phonon/plotter.py
|
Python
|
mit
| 15,693
| 0.000828
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import logging
from collections import OrderedDict
import numpy as np
from monty.json import jsanitize
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.util.plotting import pretty_plot
from pymatgen.electronic_structure.plotter import plot_brillouin_zone
"""
This module implements plotter for DOS and band structure.
"""
logger = logging.getLogger(__name__)
class PhononDosPlotter(object):
"""
Class for plotting phonon DOSs. Note that the interface is extremely flexible
given that there are many different ways in which people want to view
DOS. The typical usage is::
# Initializes plotter with some optional args. Defaults are usually
# fine,
plotter = PhononDosPlotter()
# Adds a DOS with a label.
plotter.add_dos("Total DOS", dos)
# Alternatively, you can add a dict of DOSs. This is the typical
# form returned by CompletePhononDos.get_element_dos().
Args:
stack: Whether to plot the DOS as a stacked area graph
key_sort_func: function used to sort the dos_dict keys.
sigma: A float specifying a standard deviation for Gaussian smearing
|
the DOS for nicer looking plots. Defaults to None for no
smearing.
"""
def __init__(self, stack=False, sigma=None):
self.stack = stack
self.sigma = sigma
self._doses = OrderedDict()
def add_dos(self, label, dos):
"""
Adds a dos for plotting.
Args:
|
label:
label for the DOS. Must be unique.
dos:
PhononDos object
"""
densities = dos.get_smeared_densities(self.sigma) if self.sigma \
else dos.densities
self._doses[label] = {'frequencies': dos.frequencies, 'densities': densities}
def add_dos_dict(self, dos_dict, key_sort_func=None):
"""
Add a dictionary of doses, with an optional sorting function for the
keys.
Args:
dos_dict: dict of {label: Dos}
key_sort_func: function used to sort the dos_dict keys.
"""
if key_sort_func:
keys = sorted(dos_dict.keys(), key=key_sort_func)
else:
keys = dos_dict.keys()
for label in keys:
self.add_dos(label, dos_dict[label])
def get_dos_dict(self):
"""
Returns the added doses as a json-serializable dict. Note that if you
have specified smearing for the DOS plot, the densities returned will
be the smeared densities, not the original densities.
Returns:
Dict of dos data. Generally of the form, {label: {'frequencies':..,
'densities': ...}}
"""
return jsanitize(self._doses)
def get_plot(self, xlim=None, ylim=None):
"""
Get a matplotlib plot showing the DOS.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
import prettyplotlib as ppl
from prettyplotlib import brewer2mpl
ncolors = max(3, len(self._doses))
ncolors = min(9, ncolors)
colors = brewer2mpl.get_map('Set1', 'qualitative', ncolors).mpl_colors
y = None
alldensities = []
allfrequencies = []
plt = pretty_plot(12, 8)
# Note that this complicated processing of frequencies is to allow for
# stacked plots in matplotlib.
for key, dos in self._doses.items():
frequencies = dos['frequencies']
densities = dos['densities']
if y is None:
y = np.zeros(frequencies.shape)
if self.stack:
y += densities
newdens = y.copy()
else:
newdens = densities
allfrequencies.append(frequencies)
alldensities.append(newdens)
keys = list(self._doses.keys())
keys.reverse()
alldensities.reverse()
allfrequencies.reverse()
allpts = []
for i, (key, frequencies, densities) in enumerate(zip(keys, allfrequencies, alldensities)):
allpts.extend(list(zip(frequencies, densities)))
if self.stack:
plt.fill(frequencies, densities, color=colors[i % ncolors],
label=str(key))
else:
ppl.plot(frequencies, densities, color=colors[i % ncolors],
label=str(key), linewidth=3)
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
else:
xlim = plt.xlim()
relevanty = [p[1] for p in allpts
if xlim[0] < p[0] < xlim[1]]
plt.ylim((min(relevanty), max(relevanty)))
ylim = plt.ylim()
plt.plot([0, 0], ylim, 'k--', linewidth=2)
plt.xlabel('Frequencies (THz)')
plt.ylabel('Density of states')
plt.legend()
leg = plt.gca().get_legend()
ltext = leg.get_texts() # all the text.Text instance in the legend
plt.setp(ltext, fontsize=30)
plt.tight_layout()
return plt
def save_plot(self, filename, img_format="eps", xlim=None, ylim=None):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.savefig(filename, format=img_format)
def show(self, xlim=None, ylim=None):
"""
Show the plot using matplotlib.
Args:
xlim: Specifies the x-axis limits. Set to None for automatic
determination.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(xlim, ylim)
plt.show()
class PhononBSPlotter(object):
"""
Class to plot or get data to facilitate the plot of band structure objects.
Args:
bs: A BandStructureSymmLine object.
"""
def __init__(self, bs):
if not isinstance(bs, PhononBandStructureSymmLine):
raise ValueError(
"PhononBSPlotter only works with PhononBandStructureSymmLine objects. "
"A PhononBandStructure object (on a uniform grid for instance and "
"not along symmetry lines won't work)")
self._bs = bs
self._nb_bands = self._bs.nb_bands
def _maketicks(self, plt):
"""
utility private method to add ticks to a band structure
"""
ticks = self.get_ticks()
# Sanitize only plot the uniq values
uniq_d = []
uniq_l = []
temp_ticks = list(zip(ticks['distance'], ticks['label']))
for i in range(len(temp_ticks)):
if i == 0:
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
else:
if temp_ticks[i][1] == temp_ticks[i - 1][1]:
logger.debug("Skipping label {i}".format(
i=temp_ticks[i][1]))
else:
logger.debug("Adding label {l} at {d}".format(
l=temp_ticks[i][0], d=temp_ticks[i][1]))
uniq_d.append(temp_ticks[i][0])
uniq_l.append(temp_ticks[i][1])
logger.debug("Unique labels are %s" % list(zip(uniq_d, uniq_l)))
plt.gca().set_xticks(uniq_d)
plt.gca().set_xticklabels(uniq_l)
for i in range(len(ticks['label'])):
if ticks['label'][i] is not None:
# don't print the same label tw
|
aleontiev/django-cli
|
djay/blueprints/init/context.py
|
Python
|
mit
| 1,263
| 0.001584
|
import click
import inflection
import os
def default(txt):
return click.style(txt, fg="white", bold=True)
def prompt(txt):
return click.style(txt, fg="green")
@click.command()
@click.argument("name")
@click.option("--description", prompt=prompt("Description"), default=default("N/A"))
@click.option(
"--author",
prompt=prompt("Author name"),
default=lambda: default(os.environ.get("USER", "")),
)
@click.option(
"--email",
prompt=prompt("Author email"),
default=lambda: default(os.environ.get("USER", "") + "@me.com"),
)
@click.option("--version", prompt=prompt("Version"), default=default("0.0.1"))
@click.option(
"--django-version", prompt=prompt("Django version"), default=default("1.10")
)
def get_context(name, description, author, email, version, django_version):
na
|
me = click.unstyle(name)
description = click.unstyle(description)
email = click.unstyle(email)
author = click.unstyle(author)
version = click.unstyle(version)
django_version = click.unstyle(django_version)
return {
"app": inflection.underscore(name),
"description": description,
"author": author,
"email": email,
"version": version,
"django_versi
|
on": django_version,
}
|
aronsky/home-assistant
|
tests/components/nut/util.py
|
Python
|
apache-2.0
| 1,446
| 0.002075
|
"""Tests for the nut integration."""
import json
from unittest.mock import MagicMock, patch
from homeassistant.components.nut.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_RESOURCES
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
def _get_mock_pynutclient(list_vars=None, list_ups=None):
pynutclient = MagicMock()
|
type(pynutclient).list_ups = MagicMock(return_value=list_ups)
type(pynutclient).list_vars = MagicMock(return_valu
|
e=list_vars)
return pynutclient
async def async_init_integration(
hass: HomeAssistant, ups_fixture: str, resources: list, add_options: bool = False
) -> MockConfigEntry:
"""Set up the nexia integration in Home Assistant."""
ups_fixture = f"nut/{ups_fixture}.json"
list_vars = json.loads(load_fixture(ups_fixture))
mock_pynut = _get_mock_pynutclient(list_ups={"ups1": "UPS 1"}, list_vars=list_vars)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "mock", CONF_PORT: "mock", CONF_RESOURCES: resources},
options={CONF_RESOURCES: resources} if add_options else {},
)
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
Linhua-Sun/p4-phylogenetics
|
p4/STMcmc.py
|
Python
|
gpl-2.0
| 122,106
| 0.005135
|
# This is STMcmc, for super tree mcmc.
# Started 18 March 2011, first commit 22 March 2011.
import pf,func
from Var import var
import math,random,string,sys,time,copy,os,cPickle,types,glob
import numpy as np
from Glitch import Glitch
from TreePartitions import TreePartitions
from Constraints import Constraints
from Tree import Tree
import datetime
import itertools
try:
import bitarray
except ImportError:
pass
def choose(n, k):
"""
A fast way to calculate binomial coefficients
by Andrew Dalke (contrib).
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in xrange(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0
# def nSplits(n):
# mySum = 0
# for k in range(2, n-1):
# mySum += choose(n-1, k)
# return mySum
def bForN(n):
# This is the log version of this function. The max diff (in
# log(result)) between this and the non-log function seems to be
# about 2.5e-10 for n up to 10000.
prodLog = 0.0
if n > 3:
for k in range(4, n + 1):
prodLog += math.log((2 * k) - 5)
return prodLog
def BS2009_Eqn30_ZTApprox(n, beta, cT):
# This log version of this function differs from from the non-log
# ver
|
sion (in log(result)) by at most 6.82e-13 for n up to 150,
# over a wide range of beta (0.001 -- 1000) and cT (2 -- n/2)
myLambda = cT/(2.0*n)
|
tester = 0.5 * math.log((n - 3.)/myLambda)
epsilon = math.exp(-2. * beta)
bigANEpsilon = 1 + (((2. * n) - 3.) * epsilon) + (2. * ((n * n) - (4. * n) - 6.) * epsilon * epsilon)
termA = math.log(bigANEpsilon + 6 * cT * epsilon * epsilon)
if beta < tester:
termB = -(2. * beta) * (n - 3.) + (myLambda * (math.exp(2. * beta) - 1.))
termB += bForN(n)
if termA > termB:
return termA
else:
return termB
else:
return termA
def popcountA(k, nBits):
count = 0
for i in range(nBits):
tester = 1L << i
if tester > k:
return count
if tester & k:
count += 1
return count
def bitReduce(bk, txBits, lLen, sLen, allOnes):
#print "bitReduce: bk %i, txBits %i, lLen %i, sLen %i, allOnes %i" % (bk, txBits, lLen, sLen, allOnes)
newBk = 0L
counter = 0
pops = 0
for pos in range(lLen):
tester = 1L << pos
#print "pos %2i, tester: %3i" % (pos, tester)
if tester & txBits:
#print " tester & txBits -- True"
if tester & bk:
adder = 1L << counter
#print " adding:", adder
newBk += adder
pops += 1
else:
#print " not adding"
pass
counter += 1
if (1 & newBk):
#print "flipping"
newBk = allOnes ^ newBk
pops = sLen - pops
#print "returning newBk %i, pops %i" % (newBk, pops)
return newBk, pops
if 0: # test bitReduce
sk = 6 # always at least 2 bits, even
txBits = 30
lLen = 5
sLen = 4
allOnes = 15
print " sk: %3i %s" % (sk, func.getSplitStringFromKey(sk, lLen))
print "taxBits: %3i %s" % (txBits, func.getSplitStringFromKey(txBits, lLen))
rsk, popcount = bitReduce(sk, txBits, lLen, sLen, allOnes)
print " rsk: %3i %s" % (rsk, func.getSplitStringFromKey(rsk, sLen))
print " popcount %i" % popcount
# sk: 6 .**..
# taxBits: 30 .****
# rsk: 12 ..**
# popcount 2
def maskedSymmetricDifference(skk, skSet, taxBits, longLen, shortLen, allOnes):
if 0:
print "-" * 50
print "skk (skk_ppy1 from the current supertree)"
for sk in skk:
print func.getSplitStringFromKey(sk, longLen)
print "skSet (from input tree)"
for sk in skSet:
print func.getSplitStringFromKey(sk, shortLen)
print "taxBits:", taxBits, func.getSplitStringFromKey(taxBits, longLen)
newSkk = []
for sk in skk:
reducedSk, popcount = bitReduce(sk, taxBits, longLen, shortLen, allOnes)
if 0:
print "taxBits: %s " % func.getSplitStringFromKey(taxBits, longLen),
print "%4i %s " % (sk, func.getSplitStringFromKey(sk, longLen)),
print "%4i %s %i" % (reducedSk, func.getSplitStringFromKey(reducedSk, shortLen), popcount)
if popcount <= 1 or popcount >= (shortLen - 1):
pass
else:
newSkk.append(reducedSk)
newSkkSet = set(newSkk)
#print newSkkSet, skSet
#print "reduced supertree splits = newSkkSet = %s" % newSkkSet
ret = len(newSkkSet.symmetric_difference(skSet))
#print "symmetric difference %i" % ret
nCherries = 0
for sk in newSkkSet:
popcount = popcountA(sk, shortLen)
if popcount == 2:
nCherries += 1
if popcount == (shortLen - 2): # not "elif", because they might both be True
nCherries += 1
#print "nCherries %i" % nCherries
return ret, nCherries
def slowQuartetDistance(st, inputTree):
dst = st.dupe()
toRemove = []
for n in dst.iterLeavesNoRoot():
if n.name not in inputTree.taxNames:
toRemove.append(n)
for n in toRemove:
dst.removeNode(n)
qd = dst.topologyDistance(inputTree, metric='scqdist')
return qd
class STChain(object):
def __init__(self, aSTMcmc):
gm = ['STChain.__init__()']
self.stMcmc = aSTMcmc
self.tempNum = -1 # 'temp'erature, not 'temp'orary
self.curTree = aSTMcmc.tree.dupe()
self.propTree = aSTMcmc.tree.dupe()
self.logProposalRatio = 0.0
self.logPriorRatio = 0.0
self.frrf = None
self.nInTreeSplits = 0
if self.stMcmc.modelName.startswith('SR2008_rf'):
self.curTree.beta = self.stMcmc.beta
self.propTree.beta = self.stMcmc.beta
if self.stMcmc.stRFCalc == 'purePython1':
self.getTreeLogLike_ppy1()
elif self.stMcmc.stRFCalc == 'fastReducedRF':
self.startFrrf()
self.getTreeLogLike_fastReducedRF()
elif self.stMcmc.stRFCalc == 'bitarray':
self.setupBitarrayCalcs()
self.getTreeLogLike_bitarray()
self.curTree.logLike = self.propTree.logLike
elif self.stMcmc.modelName.startswith('SPA'):
self.curTree.spaQ= self.stMcmc.spaQ
self.propTree.spaQ = self.stMcmc.spaQ
for t in self.stMcmc.trees:
self.nInTreeSplits += len(t.splSet)
#print "Got nInTreeSplits %s" % self.nInTreeSplits
self.setupBitarrayCalcs()
self.getTreeLogLike_spa_bitarray()
self.curTree.logLike = self.propTree.logLike
elif self.stMcmc.modelName.startswith('QPA'):
self.curTree.spaQ= self.stMcmc.spaQ
self.propTree.spaQ = self.stMcmc.spaQ
self.nPossibleQuartets = choose(self.stMcmc.tree.nTax, 4) * 3
self.getTreeLogLike_qpa_slow()
self.curTree.logLike = self.propTree.logLike
else:
gm.append('Unknown modelName %s' % self.stMcmc.modelName)
raise Glitch, gm
if 0:
print "STChain init()"
self.curTree.draw()
print "logLike is %f" % self.curTree.logLike
def getTreeLogLike_qpa_slow(self):
gm = ["STChain.getTreeLogLike_qpa_slow()"]
if self.propTree.spaQ > 1. or self.propTree.spaQ <= 0.0:
gm.append("bad propTree.spaQ value %f" % self.propTree.spaQ)
raise Glitch, gm
for n in self.propTree.iterInternalsPostOrder():
if n == self.propTree.root:
break
n.stSplitKey = n.leftChild.stSplitKey
p = n.leftChild.sibling
while p:
n.stSplitKey |= p.stSplitKey # "or", in-place
p = p.sibling
self.propTree.skk = [n.stSplitKey for n in self.propTree.iterInt
|
toefl/pyotp
|
src/pyotp/hotp.py
|
Python
|
mit
| 1,325
| 0.000755
|
from pyotp.otp import OTP
from pyotp import utils
class HOTP(OTP):
def at(self, count):
"""
Generates the OTP for the given count
@param [Integer] count counter
@returns [Integer] OTP
"""
return self.generate_otp(count)
def verify(self, otp, counter):
"""
Verifies the OTP passed in against the current time OTP
@param [String/Integer] otp the OTP to check against
@param [Integer] counter the counter of the OTP
"""
return unicode(otp) == unicode(self.at(counter))
def provisioning_uri(self, name, initial_count=0, issuer_name=None):
"""
Returns the provisioning URI for the OTP
This can then be encoded in a QR Code and used
to provision the Google Authenticator app
@param [String] name of the account
@param [Integer]
|
initial_count starting counter value, defaults to 0
|
@param [String] the name of the OTP issuer; this will be the
organization title of the OTP entry in Authenticator
@return [String] provisioning uri
"""
return utils.build_uri(
self.secret,
name,
initial_count=initial_count,
issuer_name=issuer_name,
)
|
kraziegent/mysql-5.6
|
xtrabackup/test/kewpie/lib/server_mgmt/drizzled.py
|
Python
|
gpl-2.0
| 9,584
| 0.019825
|
#! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2010,2011 Patrick Crews
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
""" drizzled.py: code to allow a serverManager
to provision and start up a drizzled server object
for test execution
"""
# imports
import os
from lib.server_mgmt.server import Server
class drizzleServer(Server):
""" represents a drizzle server, its possessions
(datadir, ports, etc), and methods for controlling
and querying it
TODO: create a base server class that contains
standard methods from which we can inherit
Currently there are definitely methods / attr
which are general
"""
def __init__( self, name, server_manager, code_tree, default_storage_engine
, server_options, requester, test_executor, workdir_root):
super(drizzleServer, self).__init__( name
, server_manager
, code_tree
, default_storage_engine
, server_options
, requester
, test_executor
, workdir_root)
self.preferred_base_port = 9306
# client files
self.drizzledump = self.code_tree.drizzledump
self.drizzle_client = self.code_tree.drizzle_client
self.drizzleimport = self.code_tree.drizzleimport
self.drizzleslap = self.code_tree.drizzleslap
self.server_path = self.code_tree.drizzle_server
self.drizzle_client_path = self.code_tree.drizzle_client
self.schemawriter = self.code_tree.schemawriter
self.trx_reader = self.code_tree.trx_reader
# Get our ports
self.port_block = self.system_manager.port_manager.get_port_block( self.name
, self.preferred_base_port
, 6 )
self.master_port = self.port_block[0]
self.drizzle_tcp_port = self.port_block[1]
self.mc_port = self.port_block[2]
self.pbms_port = self.port_block[3]
self.rabbitmq_node_port = self.port_block[4]
self.json_server_port = self.port_block[5]
# Generate our working directories
self.dirset = {'var_%s' %(self.name): {'std_data_ln':( os.path.join(self.code_tree.testdir,'std_data'))
,'log':None
,'run':None
,'tmp':None
,'master-data': {'local': { 'test':None
, 'mysql':None
}
}
}
}
self.workdir = self.system_manager.create_dirset( workdir_root
, self.dirset)
self.vardir = self.workdir
self.tmpdir = os.path.join(self.vardir,'tmp')
self.rundir = os.path.join(self.vardir,'run')
self.logdir = os.path.join(self.vardir,'log')
self.datadir = os.path.join(self.vardir,'master-data')
self.error_log = os.path.join(self.logdir,'error.log')
self.pid_file = os.path.join(self.rundir,('%s.pid' %(self.name)))
self.socket_file = os.path.join(self.vardir, ('%s.sock' %(self.name)))
if len(self.socket_file) > 107:
|
# MySQL has a limitation of 107 characters for socket file path
# we copy the mtr workaround of creating one in /tmp
self.logging.verbose("Default socket file path: %s" %(self.socket_file))
self.socket_file = "/tmp/%s_%s.%s.sock" %(self.system_manager.uuid
,self.owner
,sel
|
f.name)
self.logging.verbose("Changing to alternate: %s" %(self.socket_file))
self.timer_file = os.path.join(self.logdir,('timer'))
# Do magic to create a config file for use with the slave
# plugin
self.slave_config_file = os.path.join(self.logdir,'slave.cnf')
self.create_slave_config_file()
self.snapshot_path = os.path.join(self.tmpdir,('snapshot_%s' %(self.master_port)))
# We want to use --secure-file-priv = $vardir by default
# but there are times / tools when we need to shut this off
if self.no_secure_file_priv:
self.secure_file_string = ''
else:
self.secure_file_string = "--secure-file-priv='%s'" %(self.vardir)
self.user_string = '--user=root'
self.initialize_databases()
self.take_db_snapshot()
self.logging.debug_class(self)
def report(self):
""" We print out some general useful info """
report_values = [ 'name'
, 'master_port'
, 'drizzle_tcp_port'
, 'mc_port'
, 'pbms_port'
, 'rabbitmq_node_port'
, 'vardir'
, 'status'
]
self.logging.info("%s server:" %(self.owner))
for key in report_values:
value = vars(self)[key]
self.logging.info("%s: %s" %(key.upper(), value))
def get_start_cmd(self):
""" Return the command string that will start up the server
as desired / intended
"""
server_args = [ self.process_server_options()
, "--mysql-protocol.port=%d" %(self.master_port)
, "--mysql-protocol.connect-timeout=60"
, "--innodb.data-file-path=ibdata1:20M:autoextend"
, "--sort-buffer-size=256K"
, "--max-heap-table-size=1M"
, "--mysql-unix-socket-protocol.path=%s" %(self.socket_file)
, "--pid-file=%s" %(self.pid_file)
, "--drizzle-protocol.port=%d" %(self.drizzle_tcp_port)
, "--default-storage-engine=%s" %(self.default_storage_engine)
, "--datadir=%s" %(self.datadir)
, "--tmpdir=%s" %(self.tmpdir)
, self.secure_file_string
, self.user_string
]
if self.gdb:
server_args.append('--gdb')
return self.system_manager.handle_gdb_reqs(self, server_args)
else:
return "%s %s %s & " % ( self.cmd_prefix
, self.server_path
, " ".join(server_args)
)
def get_stop_cmd(self):
""" Return the command that will shut us down """
return "%s --user=root --port=%d --connect-timeout=5 --silent --password= --shutdown " %(self.drizzle_client_path, self.master_port)
def get_ping_cmd(self):
"""Return the command string that will
|
sahiljain/catapult
|
dashboard/dashboard/services/issue_tracker_service.py
|
Python
|
bsd-3-clause
| 8,780
| 0.005353
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides a layer of abstraction for the issue tracker API."""
import json
import logging
from apiclient import discovery
from apiclient import errors
_DISCOVERY_URI = ('https://monorail-prod.appspot.com'
'/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest')
STATUS_DUPLICATE = 'Duplicate'
class IssueTrackerService(object):
"""Class for updating bug issues."""
def __init__(self, http):
"""Initializes an object for adding and updating bugs on the issue tracker.
This object can be re-used to make multiple requests without calling
apliclient.discovery.build multiple times.
This class makes requests to the Monorail API.
API explorer: https://goo.gl/xWd0dX
Args:
http: A Http object that requests will be made through; this should be an
Http object that's already authenticated via OAuth2.
"""
self._service = discovery.build(
'monorail', 'v1', discoveryServiceUrl=_DISCOVERY_URI, http=http)
def AddBugComment(self, bug_id, comment, status=None, cc_list=None,
merge_issue=None, labels=None, owner=None, send_email=True):
"""Adds a comment with the bisect results to the given bug.
Args:
bug_id: Bug ID of the issue to update.
comment: Bisect results information.
status: A string status for bug, e.g. Assigned, Duplicate, WontFix, etc.
cc_list: List of email addresses of users to add to the CC list.
merge_issue: ID of the issue to be merged into; specifying this option
implies that the status should be "Duplicate".
labels: List of labels for bug.
owner: Owner of the bug.
send_email: True to send email to bug cc list, False otherwise.
Returns:
True if successful, False otherwise.
"""
if not bug_id or bug_id < 0:
return False
body = {'content': comment}
updates = {}
# Mark issue as duplicate when relevant bug ID is found in the datastore.
# Avoid marking an issue as duplicate of itself.
if merge_issue and int(merge_issue) != bug_id:
status = STATUS_DUPLICATE
updates['mergedInto'] = merge_issue
logging.info('Bug %s marked as duplicate of %s', bug_id, merge_issue)
if status:
updates['status'] = status
if cc_list:
updates['cc'] = cc_list
if labels:
updates['labels'] = labels
if owner:
updates['owner'] = owner
body['updates'] = updates
return self._MakeCommentRequest(bug_id, body, send_email=send_email)
def List(self, **kwargs):
"""Makes a request to the issue tracker to list bugs."""
request = self._service.issues().list(projectId='chromium', **kwargs)
return self._ExecuteRequest(request)
def GetIssue(self, issue_id):
"""Makes a request to the issue tracker to get an issue."""
request = self._service.issues().get(projectId='chromium', issueId=issue_id)
return self._ExecuteRequest(request)
def _MakeCommentRequest(self, bug_id, body, retry=True, send_email=False):
"""Makes a request to the issue tracker to update a bug.
Args:
bug_id: Bug ID of the issue.
body: Dict of comment parameters.
retry: True to retry on failure, False otherwise.
send_email: True to send email to bug cc
|
list, False otherwise.
Returns:
True if successful posted a comment or issue was deleted. False if
making a comment failed unexpectedly.
"""
request = self._service.issues().comments().insert(
|
projectId='chromium',
issueId=bug_id,
sendEmail=send_email,
body=body)
try:
if self._ExecuteRequest(request, ignore_error=False):
return True
except errors.HttpError as e:
reason = _GetErrorReason(e)
# Retry without owner if we cannot set owner to this issue.
if retry and 'The user does not exist' in reason:
_RemoveOwnerAndCC(body)
return self._MakeCommentRequest(bug_id, body, retry=False)
# This error reason is received when issue is deleted.
elif 'User is not allowed to view this issue' in reason:
logging.warning('Unable to update bug %s with body %s', bug_id, body)
return True
logging.error('Error updating bug %s with body %s', bug_id, body)
return False
def NewBug(self, title, description, labels=None, components=None,
owner=None, cc=None):
"""Creates a new bug.
Args:
title: The short title text of the bug.
description: The body text for the bug.
labels: Starting labels for the bug.
components: Starting components for the bug.
owner: Starting owner account name.
cc: CSV of email addresses to CC on the bug.
Returns:
The new bug ID if successfully created, or None.
"""
body = {
'title': title,
'summary': title,
'description': description,
'labels': labels or [],
'components': components or [],
'status': 'Assigned' if owner else 'Untriaged',
}
if owner:
body['owner'] = {'name': owner}
if cc:
body['cc'] = [{'name': account.strip()}
for account in cc.split(',') if account.strip()]
return self._MakeCreateRequest(body)
def _MakeCreateRequest(self, body):
"""Makes a request to create a new bug.
Args:
body: The request body parameter dictionary.
Returns:
A bug ID if successful, or None otherwise.
"""
request = self._service.issues().insert(
projectId='chromium',
sendEmail=True,
body=body)
logging.info('Making create issue request with body %s', body)
response = self._ExecuteRequest(request)
if response and 'id' in response:
return response['id']
logging.error('Failed to create new bug; response %s', response)
return None
def GetIssueComments(self, bug_id):
"""Gets all the comments for the given bug.
Args:
bug_id: Bug ID of the issue to update.
Returns:
A list of comments
"""
if not bug_id or bug_id < 0:
return None
response = self._MakeGetCommentsRequest(bug_id)
if not response:
return None
return [{
'author': r['author'].get('name'),
'content': r['content'],
'published': r['published']
} for r in response.get('items')]
def GetLastBugCommentsAndTimestamp(self, bug_id):
"""Gets last updated comments and timestamp in the given bug.
Args:
bug_id: Bug ID of the issue to update.
Returns:
A dictionary with last comment and timestamp, or None on failure.
"""
if not bug_id or bug_id < 0:
return None
response = self._MakeGetCommentsRequest(bug_id)
if response and all(v in response.keys()
for v in ['totalResults', 'items']):
bug_comments = response.get('items')[response.get('totalResults') - 1]
if bug_comments.get('content') and bug_comments.get('published'):
return {
'comment': bug_comments.get('content'),
'timestamp': bug_comments.get('published')
}
return None
def _MakeGetCommentsRequest(self, bug_id):
"""Makes a request to the issue tracker to get comments in the bug."""
# TODO (prasadv): By default the max number of comments retrieved in
# one request is 100. Since bisect-fyi jobs may have more then 100
# comments for now we set this maxResults count as 10000.
# Remove this max count once we find a way to clear old comments
# on FYI issues.
request = self._service.issues().comments().list(
projectId='chromium',
issueId=bug_id,
maxResults=10000)
return self._ExecuteRequest(request)
def _ExecuteRequest(self, request, ignore_error=True):
"""Makes a request to the issue tracker.
Args:
request: The request object, which has a execute method.
Returns:
The response if there was one, or else None.
"""
try:
response = request.execute()
return response
except e
|
CharLLCH/jianchi_alimobileR
|
ftrldata/TCReBuild/codes/mylibs/size.py
|
Python
|
gpl-2.0
| 65
| 0
|
item_id = 4986168
user_id =
|
20000
item_category =
|
9656
time = 31
|
Princeton-CDH/winthrop-django
|
winthrop/books/migrations/0004_unique_together_sort.py
|
Python
|
apache-2.0
| 1,351
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 19:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('books', '0003_initial_subjects_languages_creatortypes'),
]
operations = [
migrations.AlterModelOptions(
name='creatortype',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='language',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='owninginstitution',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='personbookrelationshiptype',
options={'ordering': [
|
'name']},
),
migrations.AlterModelOptions(
name='publisher',
options={'ordering': ['name']},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ['name']},
),
migrations.AlterUniqueTogether(
name='booklanguage',
unique_together=set([('book', 'language')]),
),
migrations.AlterUniqueTogethe
|
r(
name='booksubject',
unique_together=set([('subject', 'book')]),
),
]
|
saddingtonbaynes/rez
|
src/rezplugins/shell/sh.py
|
Python
|
gpl-3.0
| 4,324
| 0.00185
|
"""
SH shell
"""
import os
import os.path
import pipes
import subprocess
from rez.config import config
from rez.utils.platform_ import platform_
from rez.shells import Shell, UnixShell
from rez.rex import EscapedString
class SH(UnixShell):
norc_arg = '--noprofile'
histfile = "~/.bash_history"
histvar = "HISTFILE"
_executable = None
@property
def executable(cls):
if cls._executable is None:
cls._executable = Shell.find_executable('sh')
return cls._executable
@classmethod
def name(cls):
return 'sh'
@classmethod
def file_extension(cls):
return 'sh'
@classmethod
def get_syspaths(cls):
if not cls.syspaths:
cmd = "cmd=`which %s`; unset PATH; $cmd %s %s 'echo __PATHS_ $PATH'" \
% (cls.name(), cls.norc_arg, cls.command_arg)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
out_, err_ = p.communicate()
if p.returncode:
paths = []
else:
lines = out_.split('\n')
line = [x for x in lines if "__PATHS_" in x.split()][0]
paths = line.strip().split()[-1].split(os.pathsep)
for path in os.defpath.split(os.path.pathsep):
if path not in paths:
paths.append(path)
cls.syspaths = [x for x in paths if x]
return cls.syspaths
@classmethod
def startup_capabilities(cls, rcfile=False, norc=False, stdin=False,
command=False):
cls._unsupported_option('rcfile', rcfile)
rcfile = False
if command
|
is not None:
cls._overruled_option('stdin', 'command', stdin)
stdin = False
return (rcf
|
ile, norc, stdin, command)
@classmethod
def get_startup_sequence(cls, rcfile, norc, stdin, command):
_, norc, stdin, command = \
cls.startup_capabilities(rcfile, norc, stdin, command)
envvar = None
files = []
if not ((command is not None) or stdin):
if not norc:
for file in ("~/.profile",):
if os.path.exists(os.path.expanduser(file)):
files.append(file)
envvar = 'ENV'
path = os.getenv(envvar)
if path and os.path.isfile(os.path.expanduser(path)):
files.append(path)
return dict(
stdin=stdin,
command=command,
do_rcfile=False,
envvar=envvar,
files=files,
bind_files=[],
source_bind_files=False)
def _bind_interactive_rez(self):
if config.set_prompt and self.settings.prompt:
self._addline('if [ -z "$REZ_STORED_PROMPT" ]; then export REZ_STORED_PROMPT=$PS1; fi')
if config.prefix_prompt:
cmd = 'export PS1="%s $REZ_STORED_PROMPT"'
else:
cmd = 'export PS1="$REZ_STORED_PROMPT" %s'
self._addline(cmd % "\[\e[1m\]$REZ_ENV_PROMPT\[\e[0m\]")
def setenv(self, key, value):
value = self.escape_string(value)
self._addline('export %s=%s' % (key, value))
def unsetenv(self, key):
self._addline("unset %s" % key)
def alias(self, key, value):
value = EscapedString.disallow(value)
cmd = 'function {key}() {{ {value} "$@"; }};export -f {key};'
self._addline(cmd.format(key=key, value=value))
def source(self, value):
value = self.escape_string(value)
self._addline('. %s' % value)
def escape_string(self, value):
value = EscapedString.promote(value)
value = value.expanduser()
result = ''
for is_literal, txt in value.strings:
if is_literal:
txt = pipes.quote(txt)
if not txt.startswith("'"):
txt = "'%s'" % txt
else:
txt = txt.replace('\\', '\\\\')
txt = txt.replace('"', '\\"')
txt = '"%s"' % txt
result += txt
return result
def _saferefenv(self, key):
pass
def register_plugin():
if platform_.name != "windows":
return SH
|
YeEmrick/learning
|
stanford-tensorflow/2017/assignments/exercises/e01_sol.py
|
Python
|
apache-2.0
| 4,765
| 0.004827
|
"""
Solution to simple TensorFlow exercises
For the problems
"""
import tensorflow as tf
###############################################################################
# 1a: Create two random 0-d tensors x and y of any distribution.
# Create a TensorFlow object that returns x + y if x > y, and x - y otherwise.
# Hint: look up tf.cond()
# I do the first problem for you
###############################################################################
x = tf.random_uniform([]) # Empty array as shape creates a scalar.
y = tf.random_uniform([])
out = tf.cond(tf.greater(x, y), lambda: tf.add(x, y), lambda: tf.subtract(x, y))
###############################################################################
# 1b: Create two 0-d tensors x and y randomly selected from the range [-1, 1).
# Return x + y if x < y, x - y if x > y, 0 otherwise.
# Hint: Look up tf.case().
###############################################################################
x = tf.random_uniform([], -1, 1, dtype=tf.float32)
y = tf.random_uniform([], -1, 1, dtype=tf.float32)
out = tf.case({tf.less(x, y): lambda: tf.add(x, y),
tf.greater(x, y): lambda: tf.subtract(x, y)},
default=lambda: tf.constant(0.0), exclusive=True)
print(x)
sess = tf.InteractiveSession()
print(sess.run(x))
###############################################################################
# 1c: Create the tensor x of the value [[0, -2, -1], [0, 1, 2]]
# and y as a tensor of zeros with the same shape as x.
# Return a boolean tensor that yields Trues if x equals y element-wise.
# Hint: Look up tf.equal().
###############################################################################
x = tf.constant([[0, -2, -1], [0, 1, 2]])
y = tf.zeros_like(x)
out = tf.equal(x, y)
###############################################################################
# 1d: Create the tensor x of value
# [29.05088806, 27.61298943, 31.19073486, 29.35532951,
# 30.97266006, 26.67541885, 38.08450317, 20.74983215,
# 34.94445419, 34.45999146, 29.06485367, 36.01657104,
# 27.88236427, 20.56035233, 30.20379066, 29.51215172,
# 33.71149445, 28.59134293, 36.05556488, 28.66994858].
# Get the indices of elements in x whose values are greater than 30.
# Hint: Use tf.where().
# Then extract elements whose values are greater than 30.
# Hint: Use tf.gather().
#######################################################
|
########################
x = tf.constant([29.05088806, 27.61298943, 31.19073486, 29.35532951,
30.97266006, 26.67541885, 38.08450317, 20.74983215,
34.94445419, 34.45999146, 29.06485367, 36.01657104,
27.88236427, 20.56035233, 30.20379066, 29.51215172,
33.71149445, 28.59134293, 36.05556488, 28.66994858])
indices = tf.where(x > 30)
out = tf.gather(x, indices)
##########
|
#####################################################################
# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
# 2, ..., 6
# Hint: Use tf.range() and tf.diag().
###############################################################################
values = tf.range(1, 7)
out = tf.diag(values)
###############################################################################
# 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
# Calculate its determinant.
# Hint: Look at tf.matrix_determinant().
###############################################################################
m = tf.random_normal([10, 10], mean=10, stddev=1)
out = tf.matrix_determinant(m)
###############################################################################
# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
# Return the unique elements in x
# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
###############################################################################
x = tf.constant([5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9])
unique_values, indices = tf.unique(x)
###############################################################################
# 1h: Create two tensors x and y of shape 300 from any normal distribution,
# as long as they are from the same distribution.
# Use tf.cond() to return:
# - The mean squared error of (x - y) if the average of all elements in (x - y)
# is negative, or
# - The sum of absolute value of all elements in the tensor (x - y) otherwise.
# Hint: see the Huber loss function in the lecture slides 3.
###############################################################################
x = tf.random_normal([300], mean=5, stddev=1)
y = tf.random_normal([300], mean=5, stddev=1)
average = tf.reduce_mean(x - y)
def f1(): return tf.reduce_mean(tf.square(x - y))
def f2(): return tf.reduce_sum(tf.abs(x - y))
out = tf.cond(average < 0, f1, f2)
|
numpy/numpy
|
numpy/core/tests/test_ufunc.py
|
Python
|
bsd-3-clause
| 105,354
| 0.000816
|
import warnings
import itertools
import sys
import pytest
import numpy as np
import numpy.core._umath_tests as umt
import numpy.linalg._umath_linalg as uml
import numpy.core._operand_flag_tests as opflag_tests
import numpy.core._rational_tests as _rational_tests
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
assert_allclose, HAS_REFCOUNT, suppress_warnings
)
from numpy.testing._private.utils import requires_memory
from numpy.compat import pickle
UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values()
if isinstance(obj, np.ufunc)]
UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
class TestUfuncKwargs:
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raise
|
s(TypeError, np.add, 1,
|
2, sigx='ii->i')
assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
assert_raises(TypeError, np.add, 1, 2, subokx=False)
assert_raises(TypeError, np.add, 1, 2, wherex=[True])
def test_sig_signature(self):
assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
signature='ii->i')
def test_sig_dtype(self):
assert_raises(TypeError, np.add, 1, 2, sig='ii->i',
dtype=int)
assert_raises(TypeError, np.add, 1, 2, signature='ii->i',
dtype=int)
def test_extobj_refcount(self):
# Should not segfault with USE_DEBUG.
assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
class TestUfuncGenericLoops:
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
"""
np_dtypes = [
(np.single, np.single), (np.single, np.double),
(np.csingle, np.csingle), (np.csingle, np.cdouble),
(np.double, np.double), (np.longdouble, np.longdouble),
(np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)]
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
def f2(x, y):
return x**y
@pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1):
xs = np.full(10, input_dtype(x), dtype=output_dtype)
ys = f(xs, xs)[::2]
assert_allclose(ys, y)
assert_equal(ys.dtype, output_dtype)
# class to use in testing object method loops
class foo:
def conjugate(self):
return np.bool_(1)
def logical_xor(self, obj):
return np.bool_(1)
def test_unary_PyUFunc_O_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.abs(x) == 1))
def test_unary_PyUFunc_O_O_method_simple(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.conjugate(x) == True))
def test_binary_PyUFunc_OO_O(self):
x = np.ones(10, dtype=object)
assert_(np.all(np.add(x, x) == 2))
def test_binary_PyUFunc_OO_O_method(self, foo=foo):
x = np.full(10, foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_binary_PyUFunc_On_Om_method(self, foo=foo):
x = np.full((10, 2, 3), foo(), dtype=object)
assert_(np.all(np.logical_xor(x, x)))
def test_python_complex_conjugate(self):
# The conjugate ufunc should fall back to calling the method:
arr = np.array([1+2j, 3-4j], dtype="O")
assert isinstance(arr[0], complex)
res = np.conjugate(arr)
assert res.dtype == np.dtype("O")
assert_array_equal(res, np.array([1-2j, 3+4j], dtype="O"))
@pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
def test_unary_PyUFunc_O_O_method_full(self, ufunc):
"""Compare the result of the object loop with non-object one"""
val = np.float64(np.pi/4)
class MyFloat(np.float64):
def __getattr__(self, attr):
try:
return super().__getattr__(attr)
except AttributeError:
return lambda: getattr(np.core.umath, attr)(val)
# Use 0-D arrays, to ensure the same element call
num_arr = np.array(val, dtype=np.float64)
obj_arr = np.array(MyFloat(val), dtype="O")
with np.errstate(all="raise"):
try:
res_num = ufunc(num_arr)
except Exception as exc:
with assert_raises(type(exc)):
ufunc(obj_arr)
else:
res_obj = ufunc(obj_arr)
assert_array_almost_equal(res_num.astype("O"), res_obj)
def _pickleable_module_global():
pass
class TestUfunc:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(pickle.dumps(np.sin,
protocol=proto)) is np.sin)
# Check that ufunc not defined in the top level numpy namespace
# such as numpy.core._rational_tests.test_add can also be pickled
res = pickle.loads(pickle.dumps(_rational_tests.test_add,
protocol=proto))
assert_(res is _rational_tests.test_add)
def test_pickle_withstring(self):
astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
def test_pickle_name_is_qualname(self):
# This tests that a simplification of our ufunc pickle code will
# lead to allowing qualnames as names. Future ufuncs should
# possible add a specific qualname, or a hook into pickling instead
# (dask+numba may benefit).
_pickleable_module_global.ufunc = umt._pickleable_module_global_ufunc
obj = pickle.loads(pickle.dumps(_pickleable_module_global.ufunc))
assert obj is umt._pickleable_module_global_ufunc
def test_reduceat_shifting_sum(self):
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_all_ufunc(self):
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.