repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
tijme/angularjs-sandbox-escape-scanner
|
acstis/Scanner.py
|
Python
|
mit
| 5,980
| 0.002007
|
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Tijme Gommers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import requests
from acstis.Payloads import Payloads
from
|
acstis.helpers.BrowserHelper import BrowserHelper
from acstis.actions.TraverseUrlAction import TraverseUrlAction
from acstis.actions.FormDataAction import FormDataAction
from acstis.actions.QueryDataAction import QueryDataAction
from nyawc.http.Handler import Handler as HTTPHandler
class Scanner:
"""The Scanner scans specific queue items on sandbox escaping/bypassing.
Attributes:
scanned_hashes list(str): A list of scanned queue item hashes.
|
__actions list(:class:`acstis.actions.BaseAction`): The actions to perform on the queue item.
__driver (:class:`acstis.Driver`): Used to check if we should stop scanning.
__verify_payload (bool): Verify if the payload was executed.
__queue_item (:class:`nyawc.QueueItem`): The queue item to perform actions on.
__session (obj): A Python requests session.
"""
scanned_hashes = []
def __init__(self, driver, angular_version, verify_payload, queue_item):
"""Initialize a scanner for the given queue item.
Args:
driver (:class:`acstis.Driver`): Used to check if we should stop scanning.
angular_version (str): The AngularJS version of the given queue_item (e.g. `1.4.2`).
verify_payload (bool): Verify if the payload was executed.
queue_item (:class:`nyawc.QueueItem`): The queue item to scan.
"""
self.__driver = driver
self.__verify_payload = verify_payload
self.__queue_item = queue_item
self.__session = requests.Session()
self.__session.mount('http://', requests.adapters.HTTPAdapter(max_retries=2))
self.__session.mount('https://', requests.adapters.HTTPAdapter(max_retries=2))
self.__actions = [
TraverseUrlAction(Payloads.get_for_version(angular_version)),
FormDataAction(Payloads.get_for_version(angular_version)),
QueryDataAction(Payloads.get_for_version(angular_version))
]
def get_vulnerable_items(self):
"""Get a list of vulnerable queue items, if any.
Returns:
list(:class:`nyawc.QueueItem`): A list of vulnerable queue items.
"""
results = []
for action in self.__actions:
if self.__driver.stopping:
break
items = action.get_action_items(self.__queue_item)
for item in items:
if self.__driver.stopping:
break
if item.get_hash() in self.scanned_hashes:
continue
self.scanned_hashes.append(item.get_hash())
if self.__is_item_vulnerable(item):
results.append(item)
return results
def __is_item_vulnerable(self, queue_item):
"""Check if the given queue item is vulnerable by executing it using the HttpHandler and checking if the payload is in scope.
Args:
queue_item (:class:`nyawc.QueueItem`): The queue item to check.
Returns:
bool: True if vulnerable, false otherwise.
"""
try:
HTTPHandler(None, queue_item)
except Exception:
return False
if not queue_item.response.headers.get("content-type") or not "html" in queue_item.response.headers.get("content-type"):
return False
if not queue_item.get_soup_response():
return False
if not self.__should_payload_execute(queue_item):
return False
if self.__verify_payload:
if not self.__verify_queue_item(queue_item.verify_item):
return False
return True
def __should_payload_execute(self, queue_item):
"""Run static checks to see if the payload should be executed.
Args:
queue_item (:class:`nyawc.QueueItem`): The queue item to check.
Returns:
bool: True if payload should execute, false otherwise.
"""
soup = queue_item.get_soup_response()
ng_app_soup = soup.select("[ng-app]")
if not ng_app_soup:
return False
for non_bindable in ng_app_soup[0].select("[ng-non-bindable]"):
non_bindable.decompose()
in_scope_html = str(ng_app_soup[0])
if queue_item.payload["value"] in in_scope_html:
return True
return False
def __verify_queue_item(self, queue_item):
"""Verify if the browser opened a new window.
Args:
queue_item (:class:`nyawc.QueueItem`): The queue item to check.
Returns:
bool: True if the payload worked, false otherwise.
"""
browser = BrowserHelper.request(queue_item)
return browser and len(browser.window_handles) >= 2
|
mircealungu/Zeeguu-Core
|
tools/tag_topics_in_danish.py
|
Python
|
mit
| 1,205
| 0.00249
|
import zeeguu_core
from zeeguu_core.model import Article, Language, LocalizedTopic
session = zeeguu_core.db.session
counter = 0
languages = Language.available_lang
|
uages()
languages = [Language.find('da')]
for language in languages:
articles = Article.query.filter(Article.language == language).order_by(Article.id.desc()).all()
loc_topics = LocalizedTopic.all_for_language(language)
total_articles = len(articles)
for article in articles:
counter += 1
print(f"{article.title}")
print(f"{article.url.as_string()}")
for
|
loc_topic in loc_topics:
if loc_topic.matches_article(article):
article.add_topic(loc_topic.topic)
print(f" #{loc_topic.topic_translated}")
print("")
session.add(article)
if counter % 1000 == 0:
percentage = (100 * counter / total_articles) / 100
print(f"{counter} dorticles done ({percentage}%). last article id: {article.id}. Comitting... ")
session.commit()
percentage = (100 * counter / total_articles) / 100
print(f"{counter} dorticles done ({percentage}%). last article id: {article.id}. Comitting... ")
session.commit()
|
jureslak/racunalniske-delavnice
|
fmf/python_v_divjini/projekt/test/test_game.py
|
Python
|
gpl-2.0
| 1,623
| 0.000616
|
from tictactoe import game, player
import unittest
from unittest import mock
class GameTest(unittest.TestCase):
def setUp(self):
self.num_of_players = 2
self.width = 3
self.height = 3
self.game = game.Game(2, 3, 3)
def test_init(self):
self.assertEqual(self.game.board, None)
self.assertEqual(self.game.width, self.width)
self.assertEqual(self.game.height, self.height)
self.assertEqual(self.game.num_of_players, self.num_of_players)
self.assertEqual(self.game.players, [])
self.assertEqual(self.game.round_counter, 0)
self.assertEqual(self.game.on_turn, 0)
def test_setup(self):
input_seq = ['Luke', 'x', 'Leia', 'o']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.setup()
expected = [('Luke', 'x'), ('Leia', 'o')]
for e, p in zip(expected, self.game.players):
self.assertEqual(p.name, e[0])
self.assertEqual(p.symbol, e[1])
def test_play_round(self):
# setup
input_seq
|
= ['Luke', 'x', 'Leia', 'o']
with mock.patch('builtins.input', side_effect=inpu
|
t_seq):
self.game.setup()
input_seq = ['2', '5', '3', '1', '9', '6', '7', '4']
with mock.patch('builtins.input', side_effect=input_seq):
self.game.play_round()
finished, winner = self.game.board.finished()
self.assertTrue(finished)
self.assertEqual(winner, 1)
expected_board = [[1, 0, 0], [1, 1, 1], [0, None, 0]]
self.assertEqual(self.game.board.grid, expected_board)
|
nuccdc/scoring_engine
|
scoring_engine/engine/serializers.py
|
Python
|
mit
| 1,320
| 0.000758
|
'''
'''
from rest_framework import serializers
import models
class PluginSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Plugin
fields = ('id', 'name', )
class ScoredServiceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.ScoredService
fields = ('id', 'name', 'plugin', 'checks', 'services')
class CheckSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Check
fields = ('id', 'key', 'value', 'scored_service')
class TeamSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Team
fields = ('id', 'name', 'services')
class ServiceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model
|
= models.Service
fields = ('id', 'scored_service', 'address', 'port', 'team', '
|
credentials', 'results')
read_only_fields = ('results', )
class CredentialSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Credential
fields = ('id', 'username', 'password', 'service')
class ResultSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.Result
fields = ('id', 'status', 'service', 'explanation')
|
jabber-at/hp
|
hp/account/migrations/0006_notifications.py
|
Python
|
gpl-3.0
| 1,200
| 0.0025
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 12:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0005_user_last_activity'),
]
operations = [
migrations.CreateModel(
name='Notifications',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=Tr
|
ue)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='notifications', serialize=False, to
|
=settings.AUTH_USER_MODEL)),
('account_expires', models.BooleanField(default=False, help_text='Accounts are deleted if they are not used for a year. Warn me a week before mine would be deleted.')),
('gpg_expires', models.BooleanField(default=False, help_text='Warn me a week before any of my GPG keys is about to expire.')),
],
options={
'abstract': False,
},
),
]
|
sirkubax/ansible
|
lib/ansible/module_utils/cloudstack.py
|
Python
|
gpl-3.0
| 13,783
| 0.004136
|
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
class AnsibleCloudStack:
def __init__(self, module):
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
}
self.module = module
self._connect()
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.zone = None
self.vm = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
self.cs = CloudStack(**read_config())
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
|
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove i
|
f not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue;
# Skip None values
if value is None:
continue;
if key in current_dict:
# API returns string for int in some cases, just to make sure
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, str):
current_dict[key] = str(current_dict[key])
# Only need to detect a singe change, not every item
if value != current_dict[key]:
return True
return False
def _get_by_key(self, key=None, my_dict={}):
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
if vm in [ v['name'], v['displayname'], v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone in [ z['name'], z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get
|
witten/borgmatic
|
borgmatic/borg/list.py
|
Python
|
gpl-3.0
| 3,343
| 0.003889
|
import logging
from borgmatic.borg.flags import make_flags, make_flags_from_arguments
from borgmatic.execute import execute_command
logger = logging.getLogger(__name__)
# A hack to convince Borg to exclude archives ending in ".checkpoint". This assumes that a
# non-checkpoint archive name ends in a digit (e.g. from a timestamp).
BORG_EXCLUDE_CHECKPOINTS_GLOB = '*[0123456789]'
def resolve_archive_name(repository, archive, storage_config, local_path='borg', remote_path=None):
'''
Given a local or remote repository path, an archive name, a storage config dict, a local Borg
path, and a remote Borg path, simply return the archive name. But if the archive name is
"latest", then instead introspect the repository for the latest successful (non-checkpoint)
archive, and return its name.
Raise ValueError if "latest" is given but there are no archives in the repository.
'''
if archive != "latest":
ret
|
urn archive
lock_wait = storage_config.get('lock_wait', None)
full_command = (
(local_path, 'list')
+ (('--info',) if logger.getEffectiveLevel() == logging.INFO else ())
+ (('--debug', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ make_flags('remote-path', remote_path)
+ make_flags('lock-wait', lock_wait)
+ make_flags('glob-archives', BORG_EXCLUDE_CHECKPOINTS_GLOB)
+ ma
|
ke_flags('last', 1)
+ ('--short', repository)
)
output = execute_command(full_command, output_log_level=None, borg_local_path=local_path)
try:
latest_archive = output.strip().splitlines()[-1]
except IndexError:
raise ValueError('No archives found in the repository')
logger.debug('{}: Latest archive is {}'.format(repository, latest_archive))
return latest_archive
def list_archives(repository, storage_config, list_arguments, local_path='borg', remote_path=None):
'''
Given a local or remote repository path, a storage config dict, and the arguments to the list
action, display the output of listing Borg archives in the repository or return JSON output. Or,
if an archive name is given, listing the files in that archive.
'''
lock_wait = storage_config.get('lock_wait', None)
if list_arguments.successful:
list_arguments.glob_archives = BORG_EXCLUDE_CHECKPOINTS_GLOB
full_command = (
(local_path, 'list')
+ (
('--info',)
if logger.getEffectiveLevel() == logging.INFO and not list_arguments.json
else ()
)
+ (
('--debug', '--show-rc')
if logger.isEnabledFor(logging.DEBUG) and not list_arguments.json
else ()
)
+ make_flags('remote-path', remote_path)
+ make_flags('lock-wait', lock_wait)
+ make_flags_from_arguments(
list_arguments, excludes=('repository', 'archive', 'paths', 'successful')
)
+ (
'::'.join((repository, list_arguments.archive))
if list_arguments.archive
else repository,
)
+ (tuple(list_arguments.paths) if list_arguments.paths else ())
)
return execute_command(
full_command,
output_log_level=None if list_arguments.json else logging.WARNING,
borg_local_path=local_path,
)
|
uber-common/opentracing-python-instrumentation
|
tests/opentracing_instrumentation/test_boto3.py
|
Python
|
mit
| 6,158
| 0
|
import datetime
import io
import boto3
import mock
import pytest
import requests
import testfixtures
from botocore.exceptions import ClientError
from opentracing.ext import tags
from opentracing_instrumentation.client_hooks import boto3 as boto3_hooks
DYNAMODB_ENDPOINT_URL = 'http://localhost:4569'
S3_ENDPOINT_URL = 'http://localhost:4572'
DYNAMODB_CONFIG = {
'endpoint_url': DYNAMODB_ENDPOINT_URL,
'aws_access_key_id': '-',
'aws_secret_access_key': '-',
'region_name': 'us-east-1',
}
S3_CONFIG = dict(DYNAMODB_CONFIG, endpoint_url=S3_ENDPOINT_URL)
def create_use
|
rs_table(dynamodb):
dynamodb.create_table(
TableName='users',
KeySchema=[{
'AttributeName': 'username',
'Key
|
Type': 'HASH'
}],
AttributeDefinitions=[{
'AttributeName': 'username',
'AttributeType': 'S'
}],
ProvisionedThroughput={
'ReadCapacityUnits': 9,
'WriteCapacityUnits': 9
}
)
@pytest.fixture
def dynamodb_mock():
import moto
with moto.mock_dynamodb2():
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
create_users_table(dynamodb)
yield dynamodb
@pytest.fixture
def dynamodb():
dynamodb = boto3.resource('dynamodb', **DYNAMODB_CONFIG)
try:
dynamodb.Table('users').delete()
except ClientError as error:
# you can not just use ResourceNotFoundException class
# to catch an error since it doesn't exist until it's raised
if error.__class__.__name__ != 'ResourceNotFoundException':
raise
create_users_table(dynamodb)
# waiting until the table exists
dynamodb.meta.client.get_waiter('table_exists').wait(TableName='users')
return dynamodb
@pytest.fixture
def s3_mock():
import moto
with moto.mock_s3():
s3 = boto3.client('s3', region_name='us-east-1')
yield s3
@pytest.fixture
def s3():
return boto3.client('s3', **S3_CONFIG)
@pytest.fixture(autouse=True)
def patch_boto3():
boto3_hooks.install_patches()
try:
yield
finally:
boto3_hooks.reset_patches()
def assert_last_span(kind, service_name, operation, tracer, response=None):
span = tracer.recorder.get_spans()[-1]
request_id = response and response['ResponseMetadata'].get('RequestId')
assert span.operation_name == 'boto3:{}:{}:{}'.format(
kind, service_name, operation
)
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
assert span.tags.get(tags.COMPONENT) == 'boto3'
assert span.tags.get('boto3.service_name') == service_name
if request_id:
assert span.tags.get('aws.request_id') == request_id
def _test_dynamodb(dynamodb, tracer):
users = dynamodb.Table('users')
response = users.put_item(Item={
'username': 'janedoe',
'first_name': 'Jane',
'last_name': 'Doe',
})
assert_last_span('resource', 'dynamodb', 'put_item', tracer, response)
response = users.get_item(Key={'username': 'janedoe'})
user = response['Item']
assert user['first_name'] == 'Jane'
assert user['last_name'] == 'Doe'
assert_last_span('resource', 'dynamodb', 'get_item', tracer, response)
try:
dynamodb.Table('test').delete_item(Key={'username': 'janedoe'})
except ClientError as error:
response = error.response
assert_last_span('resource', 'dynamodb', 'delete_item', tracer, response)
response = users.creation_date_time
assert isinstance(response, datetime.datetime)
assert_last_span('resource', 'dynamodb', 'describe_table', tracer)
def _test_s3(s3, tracer):
fileobj = io.BytesIO(b'test data')
bucket = 'test-bucket'
response = s3.create_bucket(Bucket=bucket)
assert_last_span('client', 's3', 'create_bucket', tracer, response)
response = s3.upload_fileobj(fileobj, bucket, 'test.txt')
assert_last_span('client', 's3', 'upload_fileobj', tracer, response)
def is_service_running(endpoint_url, expected_status_code):
try:
# feel free to suggest better solution for this check
response = requests.get(endpoint_url, timeout=1)
return response.status_code == expected_status_code
except requests.exceptions.ConnectionError:
return False
def is_dynamodb_running():
return is_service_running(DYNAMODB_ENDPOINT_URL, 502)
def is_s3_running():
return is_service_running(S3_ENDPOINT_URL, 200)
def is_moto_presented():
try:
import moto
return True
except ImportError:
return False
@pytest.mark.skipif(not is_dynamodb_running(),
reason='DynamoDB is not running or cannot connect')
def test_boto3_dynamodb(thread_safe_tracer, dynamodb):
_test_dynamodb(dynamodb, thread_safe_tracer)
@pytest.mark.skipif(not is_moto_presented(),
reason='moto module is not presented')
def test_boto3_dynamodb_with_moto(thread_safe_tracer, dynamodb_mock):
_test_dynamodb(dynamodb_mock, thread_safe_tracer)
@pytest.mark.skipif(not is_s3_running(),
reason='S3 is not running or cannot connect')
def test_boto3_s3(s3, thread_safe_tracer):
_test_s3(s3, thread_safe_tracer)
@pytest.mark.skipif(not is_moto_presented(),
reason='moto module is not presented')
def test_boto3_s3_with_moto(s3_mock, thread_safe_tracer):
_test_s3(s3_mock, thread_safe_tracer)
@testfixtures.log_capture()
def test_boto3_s3_missing_func_instrumentation(capture):
class Patcher(boto3_hooks.Boto3Patcher):
S3_FUNCTIONS_TO_INSTRUMENT = 'missing_func',
Patcher().install_patches()
capture.check(('root', 'WARNING', 'S3 function missing_func not found'))
@mock.patch.object(boto3_hooks, 'patcher')
def test_set_custom_patcher(default_patcher):
patcher = mock.Mock()
boto3_hooks.set_patcher(patcher)
assert boto3_hooks.patcher is not default_patcher
assert boto3_hooks.patcher is patcher
boto3_hooks.install_patches()
boto3_hooks.reset_patches()
patcher.install_patches.assert_called_once()
patcher.reset_patches.assert_called_once()
|
jpartogi/DOSBox.py
|
tests/filesystem/directory.py
|
Python
|
gpl-3.0
| 733
| 0.004093
|
import unittest
from dosbox.filesystem.dire
|
ctory import *
class DirectoryTestCase(unittest.TestCase):
def setUp(self):
self.root_dir = Directory("root")
self.sub_dir1 = Directory("subdir1")
def test_path(self):
self.root_dir.add(self.sub_dir1)
self.assertEqual(self.sub_dir1.parent, self.root_dir)
self.assertEqual(self.sub_dir1.path, "root\subdir1")
def test_add_remove(self):
subdir = Directory("subdir")
self.root_dir.a
|
dd(subdir)
self.assertEqual(subdir.parent, self.root_dir)
self.root_dir.remove(subdir)
self.assertEqual(subdir.parent, None)
def test_number_of_contained_file_system_item(self):
return NotImplemented
|
saymedia/SaySpider
|
saymedia/saymedia/settings.py
|
Python
|
mit
| 1,287
| 0.001554
|
# -*- coding: utf-8 -*-
# Scrapy settings for saymedia project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'saymedia'
SPIDER_MODULES = ['saymedia.spiders']
NEWSPIDER_MODULE = 'saymedia.spiders'
ROBOTSTXT_OBEY = True
DOWNLOADER_MIDDLEWARES = {
'saymedia.middleware.ErrorConverterMiddleware': 1,
# 'saymedia.middleware.MysqlDownloaderMiddleware': 1,
'saymedia.middleware.OriginHostMiddleware': 2,
'saymedia.middleware.TimerDownloaderMiddleware': 998,
}
SPIDER_REPORTS = {
'xml': 'saymedia.reports.XmlReport',
'firebase': 'saymedia.reports.FirebaseReport',
}
SPIDER_MIDDLEWARES = {
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': None,
}
ITEM_PIPELINES = {
'saymedi
|
a.pipelines.DatabaseWriterPipeline': 0,
}
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'SEO Spider (+http://www.say
|
media.com)'
DATABASE = {
'USER': 'YOUR_DATABASE_USER',
'PASS': 'YOUR_DATABASE_PASS',
}
FIREBASE_URL = "YOUR_FIREBASE_URL"
try:
# Only used in development environments
from .local_settings import *
except ImportError:
pass
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram2d/_histfunc.py
|
Python
|
mit
| 487
| 0.002053
|
import _plotly_utils.basevalidators
class HistfuncValidator(_plotly_
|
utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="histfunc", parent_name="histogram2d", **kwargs):
super(HistfuncValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs
|
.pop("values", ["count", "sum", "avg", "min", "max"]),
**kwargs
)
|
sistason/pa3
|
src/pa3_frontend/pa3_django/pa3_web/subscription_sms_handling.py
|
Python
|
gpl-3.0
| 411
| 0.007299
|
from django.http im
|
port HttpResponse, JsonResponse
from pa3_web.models import Subscriber
#
# Example of a subscription client
#
def delete_subscriber(phone_number):
[sub.delete() for sub in Subscriber.objects.filter(protocol='sms',
identifier=phone_number)]
return Ht
|
tpResponse(200)
def notify(subscriber):
# Send Notifying SMS
return
|
nrebhun/FileSponge
|
src/filesponge.py
|
Python
|
mit
| 2,647
| 0.0068
|
#!/usr/bin/env python
import subprocess, os, sys, argparse
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="First target directory for evaluation")
parser.add_argument("directories", nargs='+', help="All other directories to be evaluated")
parser.add_argument("-o", "--output", help="Output destination. If none specified, defaults to STDOUT")
args = parser.parse_args()
def fileSponge(dirs, outputDir):
commonList = findIdentical(dirs).rstrip()
outputCommon(commonList, outputDir)
def findIdentical(dirs):
prev = None
for index in dirs:
if prev is None:
prev = index
else:
diff = "diff --brief -Nrs %s %s" % (prev, index)
egrepPattern = "^Files .+ and .+ are identical$"
awkPattern = "(Files | and | are identical)"
diffProcess = subprocess.Popen(diff.split(), stdout=subprocess.PIPE)
egrepProcess = subprocess.Popen(["egrep", egrepPattern], stdout=subprocess.PIPE, stdin=diffProcess.stdout)
awkProcess = subprocess.Popen(["awk", "-F", awkPattern, "{print($2, \"==\", $3)}"], stdout=subprocess.PIPE, stdin=egrepProcess.stdout)
(out, err) = awkProcess.communicate()
return out
def outputCommon(commonList, outputDir):
if outputDir is not None:
options = "-av"
exclusions = "--exclude='*'"
srcPath = "./"
destPath = "%s/" % (outputDir)
targetFiles = isolateTargetFiles(commonList)
inclusions = "--files-from=./commonFiles.txt"#generateRsyncInclusionString(targetFiles)
writeInclusionListToDisk(targetFiles)
rsync = "rsync %s %s %s %s" % (options, inclusions, srcPath, destPath)
print rsync
rsyncProcess = subprocess.call(rsync.split())
else:
|
print("Identical files:\n%s" % (commonList))
def isolateTargetFiles(commonList):
targetFiles = []
for line in commonList.split('\n'):
targetFiles.append(line.split()[0])
return targetFiles
def generateRsyncInclusionString(targetFiles):
inclusions = ''
for item in targetFiles:
inclusions += " --include='./%s'" % (item)
ret
|
urn inclusions
def writeInclusionListToDisk(targetFiles):
outfile = open('commonFiles.txt', 'w')
for item in targetFiles:
outfile.write("%s\n" % item)
def usage():
dirList = []
outputDir = None
if args.output:
outputDir = args.output or None
if args.directory:
dirList = args.directory.split()
if args.directories:
dirList += args.directories
fileSponge(dirList, outputDir)
usage()
|
nachandr/cfme_tests
|
cfme/fixtures/screenshots.py
|
Python
|
gpl-2.0
| 1,470
| 0.001361
|
"""Taking screenshots inside tests!
If you want to take a screenshot inside your test, just do it like this:
.. code-block:: python
def test_my_test(take_screenshot):
# do something
take_screenshot("Particular name for the screenshot")
# do something else
"""
import fauxfactory
import pytest
from cfme.fixtures.artifactor_plugin import fire_art_tes
|
t_hook
from cfme.fixtures.pytest_store import store
from cfme.utils.browser import take_screenshot as take_browser_screenshot
from cfme.utils.log import logger
@pytest.fixture(scope="function")
def take_screenshot(request):
item = request.node
def _take_screenshot(name):
logger
|
.info(f"Taking a screenshot named {name}")
ss, ss_error = take_browser_screenshot()
g_id = fauxfactory.gen_alpha(length=6)
if ss:
fire_art_test_hook(
item, 'filedump',
description=f"Screenshot {name}", file_type="screenshot", mode="wb",
contents_base64=True, contents=ss, display_glyph="camera",
group_id=f"fix-screenshot-{g_id}", slaveid=store.slaveid)
if ss_error:
fire_art_test_hook(
item, 'filedump',
description=f"Screenshot error {name}", mode="w", contents_base64=False,
contents=ss_error, display_type="danger",
group_id=f"fix-screenshot-{g_id}", slaveid=store.slaveid)
return _take_screenshot
|
choozm/mamakstallinvestor-stockquote
|
populate_myportfolio.py
|
Python
|
mit
| 6,849
| 0.019711
|
import os
import datetime
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'invest.settings')
import django
django.setup()
from myportfolio.models import Investor, Portfolio, AssetClass, STOCKS, BONDS,\
ALTERNATIVES, Security, Transaction, Account
def populate():
investor1 = add_investor(name='David Lim',
username='Dave',
email='dave@gmail.com')
p1 = add_portfolio(owner=investor1,
name='Retirement',
obj='Save for retirement',
risk_tolerance='I am comfortable with 80/20 stock bond ratio',
time_frame=30,
stock_bond_ratio=4,
asset_allocation={})
a1 = add_assetclass(owner=investor1,
name='US',
asset_type=STOCKS)
a2 = add_assetclass(owner=investor1,
name='EU',
asset_type=STOCKS)
a3 = add_assetclass(owner=investor1,
name='Global Bonds',
|
asset_type=BONDS)
p1.target_asset_allocation[a1.id] = 0.3
p1.target_asset_allocation[a2.id] = 0.3
p1.target_asset_allocation[a3.id] = 0.4
p1.save()
s1 = add_security(asset_class=a1,
name='Vanguard Total Stock ETF',
symbol='VTI',
isin='QW1234456',
currency='USD',
|
exchange='NYSE',
expense_ratio_percent=0.1,
last_trade_price=100.05)
ac1 = add_account(owner=investor1,
name='SCB SGD',
description='SGD trading account')
ac2 = add_account(owner=investor1,
name='SCB USD',
description='USD trading account')
ac2 = add_account(owner=investor1,
name='SCB GBP',
description='GBP trading account')
t1 = add_transaction(portfolio=p1,
security=s1,
account=ac2,
date=datetime.date(2016, 5, 3),
price=100.0,
quantity=10)
t2 = add_transaction(portfolio=p1,
security=s1,
account=ac2,
date=datetime.date(2016, 5, 18),
price=108.0,
quantity=5)
investor2 = add_investor(name='Diana',
username='Rose',
email='rose@gmail.com')
p2 = add_portfolio(owner=investor2,
name='New house',
obj='Save for new house',
risk_tolerance='I am comfortable with 50/50 stock bond ratio',
time_frame=15,
stock_bond_ratio=1,
asset_allocation={})
a4 = add_assetclass(owner=investor2,
name='World',
asset_type=STOCKS)
a5 = add_assetclass(owner=investor2,
name='REIT',
asset_type=ALTERNATIVES)
a6 = add_assetclass(owner=investor2,
name='Global Bonds',
asset_type=BONDS)
p2.target_asset_allocation[a4.id] = 0.5
p2.target_asset_allocation[a5.id] = 0.1
p2.target_asset_allocation[a6.id] = 0.4
p2.save()
for i in Investor.objects.all():
print ('{} - {} - {}'.format(i.name, i.username, i.email))
for ac in Account.objects.filter(owner=i):
print ('{} - {}'.format(ac.name, ac.description))
for p in Portfolio.objects.filter(owner=i):
print (' {} - {} - {} - {}'.format(p.name, p.objective, p.time_frame, p.target_asset_allocation))
for a in AssetClass.objects.filter(owner=i):
print (' {}. {} - {}'.format(a.id, a.name, a.type))
for s in Security.objects.filter(asset_class=a):
print (' {} {}'.format(s.name, s.symbol))
for t in Transaction.objects.filter(security=s):
print (' {} {} {} {} {}'.format(t.security, t.account, t.date, t.price, t.quantity))
def add_investor(name, username, email):
i=Investor.objects.get_or_create(name=name,
username=username,
email=email)[0]
return i
def add_portfolio(owner, name, obj, risk_tolerance, time_frame, stock_bond_ratio, asset_allocation):
p = Portfolio.objects.get_or_create(owner=owner,
time_frame=time_frame,
target_stock_bond_ratio=stock_bond_ratio,
)[0]
p.owner = owner
p.name = name
p.objective = obj
p.risk_tolerance = risk_tolerance
p.target_asset_allocation = asset_allocation
return p
def add_assetclass(owner, name, asset_type):
a = AssetClass.objects.get_or_create(owner=owner,
name=name,
type=asset_type,
)[0]
return a
def add_security(asset_class,
name,
symbol,
isin,
currency,
exchange,
expense_ratio_percent,
last_trade_price):
s = Security.objects.get_or_create(asset_class=asset_class,
name=name,
symbol=symbol,
isin=isin,
currency=currency,
exchange=exchange,
expense_ratio_percent=expense_ratio_percent,
last_trade_price=last_trade_price,
)[0]
return s
def add_account(owner, name, description):
ac = Account.objects.get_or_create(owner=owner,
name=name,
description=description
)[0]
return ac
def add_transaction(portfolio, security, account,
date, price, quantity):
t = Transaction.objects.get_or_create(portfolio=portfolio,
security=security,
account=account,
date=date,
price=price,
quantity=quantity)[0]
return t
if __name__ == '__main__':
populate()
|
MrSami/sandbox
|
alpagu/restate/apps.py
|
Python
|
mit
| 89
| 0
|
from dja
|
ngo.apps import AppConfig
|
class RestateConfig(AppConfig):
name = 'restate'
|
galaxy-iuc/parsec
|
parsec/commands/groups/delete_group_user.py
|
Python
|
apache-2.0
| 466
| 0
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('delete_group_user')
@click.argument("group_id", type=str)
@click.argument("user_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, group_id, user_id):
"""Remove a user from the given group.
Outp
|
ut:
The user which was removed
"""
return ctx.gi.grou
|
ps.delete_group_user(group_id, user_id)
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_doak_sif.py
|
Python
|
mit
| 441
| 0.047619
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_doak
|
_sif.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
|
amwelch/a10sdk-python
|
a10sdk/core/logging/logging_disable_partition_name.py
|
Python
|
apache-2.0
| 1,271
| 0.009441
|
from a10sdk.common.A1
|
0BaseClass import A10BaseClass
class DisablePartitionName(A10BaseClass):
"""Class Description::
.
Class disable-partition-name supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param disable_partition_name: {"description": "Disable partition name in logs", "pa
|
rtition-visibility": "shared", "default": 0, "type": "number", "format": "flag", "optional": true}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/logging/disable-partition-name`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "disable-partition-name"
self.a10_url="/axapi/v3/logging/disable-partition-name"
self.DeviceProxy = ""
self.disable_partition_name = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
googleapis/python-aiplatform
|
.sample_configs/param_handlers/delete_specialist_pool_sample.py
|
Python
|
apache-2.0
| 714
| 0.001401
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "Li
|
cense");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
d
|
ef make_name(name: str) -> str:
# Sample function parameter name in delete_specialist_pool_sample
name = name
return name
|
lovelysystems/pyjamas
|
library/pyjamas/ui/CheckBox.py
|
Python
|
apache-2.0
| 3,250
| 0.003077
|
# Copyright 2006 James Tauber and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas.ui.ButtonBase import ButtonBase
from pyjamas.ui import Event
_CheckBox_unique_id=0;
class CheckBox(ButtonBase):
def __init__(self, label=None, asHTML=False, **kwargs):
if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-CheckBox"
if label:
if asHTML:
kwargs['HTML'] = label
else:
kwargs['Text'] = label
self.initElement(DOM.createInputCheck(), **kwargs)
def initElement(self, element, **kwargs):
self.inputElem = element
self.labelElem = DOM.createLabel()
ButtonBase.__init__(self, DOM.createSpan(), **kwargs)
self.unsinkEvents(Event.FOCUSEVENTS| Event.ONCLICK)
DOM.sinkEvents(self.inputElem, Event.FOCUSEVENTS | Event.ONCLICK | DOM.getEventsSunk(self.inputElem))
DOM.appendChild(self.getElement(), self.inputElem)
DOM.appendChild(self.getElement(), self.labelElem)
uid = "check%d" % self.getUniqueID()
DOM.setAttribute(self.inputElem, "id", uid)
DOM.setAttribute(self.labelElem, "htmlFor", uid)
# emulate s
|
tatic
def getUniqueID(self):
global _CheckBox_unique_id
_CheckBox_unique_id += 1
return _CheckBox_unique_id;
def getHTML(self):
return DOM.getInnerHTML(self.labelElem)
def getName(self):
return DOM.getAttribute(self.inputElem, "name")
def getText(self):
return DOM.getInnerText(self.labelElem)
def setChecked(self, checke
|
d):
DOM.setBooleanAttribute(self.inputElem, "checked", checked)
DOM.setBooleanAttribute(self.inputElem, "defaultChecked", checked)
def isChecked(self):
if self.isAttached():
propName = "checked"
else:
propName = "defaultChecked"
return DOM.getBooleanAttribute(self.inputElem, propName)
def isEnabled(self):
return not DOM.getBooleanAttribute(self.inputElem, "disabled")
def setEnabled(self, enabled):
DOM.setBooleanAttribute(self.inputElem, "disabled", not enabled)
def setFocus(focused):
if focused:
Focus.focus(self.inputElem)
else:
Focus.blur(self.inputElem)
def setHTML(self, html):
DOM.setInnerHTML(self.labelElem, html)
def setName(self, name):
DOM.setAttribute(self.inputElem, "name", name)
def setTabIndex(self, index):
Focus.setTabIndex(self.inputElem, index)
def setText(self, text):
DOM.setInnerText(self.labelElem, text)
def onDetach(self):
self.setChecked(self.isChecked())
ButtonBase.onDetach(self)
|
mozilla/remoteobjects
|
remoteobjects/dataobject.py
|
Python
|
bsd-3-clause
| 9,792
| 0.000511
|
# Copyright (c) 2009-2010 Six Apart Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Six Apart Ltd. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
`DataObject` is a class of object that provides coding between object
attributes and dictionaries, suitable for
In `DataObject` is the mechanism for converting between dictionaries and
objects. These conversions are performed with aid of `Field` instances
declared on `DataObject` subclasses. `Field` classes reside in the
`remoteobjects.field` module.
"""
from copy import deepcopy
import logging
import remoteobjects.fields
classes_by_name = {}
classes_by_constant_field = {}
def find_by_name(name):
"""Finds and returns the DataObject subclass with the given name.
Parameter `name` should be a bare class name with no module. If there is
no class by that name, raises `KeyError`.
"""
return classes_by_name[name]
class DataObjectMetaclass(type):
"""Metaclass for `DataObject` classes.
This metaclass installs all `remoteobjects.fields.Property` instances
declared as attributes of the new class, including all `Field` and `Link`
instances.
This metaclass also makes the new class findable through the
`dataobject.find_by_name()` function.
"""
def __new__(cls, name, bases, attrs):
"""Creates and returns a new `DataObject` class with its declared
fields and name."""
fields = {}
new_fields = {}
new_properties = {}
# Inherit all the parent DataObject classes' fields.
for base in bases:
if isinstance(base, DataObjectMetaclass):
fields.update(base.fields)
# Move all the class's attributes that are Fields to the fields set.
for attrname, field in attrs.items():
if isinstance(field, remoteobjects.fields.Property):
new_properties[attrname] = field
if isinstance(field, remoteobjects.fields.Field):
new_fields[attrname] = field
elif attrname in fields:
# Throw out any parent fields that the subclass defined as
# something other than a Field.
del fields[attrname]
fields.update(new_fields)
attrs['fields'] = fields
obj_cls = super(DataObjectMetaclass, cls).__new__(cls, name, bases, attrs)
for field, value in new_properties.items():
obj_cls.add_to_class(field, value)
# Register the new class so Object fields can have forward-referenced it.
classes_by_name[name] = obj_cls
# Tell this class's fields what this class is, so they can find their
# forward references later.
for field in new_properties.values():
field.of_cls = obj_cls
return obj_cls
def add_to_class(cls, name, value):
try:
value.install(name, cls)
except (NotImplementedError, AttributeError):
setattr(cls, name, value)
class DataObject(object):
"""An object that can be decod
|
ed from or encoded as a dictionary.
DataObject subclasses should be de
|
clared with their different data
attributes defined as instances of fields from the `remoteobjects.fields`
module. For example:
>>> from remoteobjects import dataobject, fields
>>> class Asset(dataobject.DataObject):
... name = fields.Field()
... updated = fields.Datetime()
... author = fields.Object('Author')
...
A DataObject's fields then provide the coding between live DataObject
instances and dictionaries.
"""
__metaclass__ = DataObjectMetaclass
def __init__(self, **kwargs):
"""Initializes a new `DataObject` with the given field values."""
self.api_data = {}
self.__dict__.update(kwargs)
def __eq__(self, other):
"""Returns whether two `DataObject` instances are equivalent.
If the `DataObject` instances are of the same type and contain the
same data in all their fields, the objects are equivalent.
"""
if type(self) != type(other):
return False
for k, v in self.fields.iteritems():
if isinstance(v, remoteobjects.fields.Field):
if getattr(self, k) != getattr(other, k):
return False
return True
def __ne__(self, other):
"""Returns whether two `DataObject` instances are different.
`DataObject` instances are different if they are not equivalent as
determined through `__eq__()`.
"""
return not self == other
@classmethod
def statefields(cls):
return cls.fields.keys() + ['api_data']
def __getstate__(self):
return dict((k, self.__dict__[k]) for k in self.statefields()
if k in self.__dict__)
def get(self, attr, *args):
return getattr(self, attr, *args)
def __iter__(self):
for key in self.fields.keys():
yield key
def to_dict(self):
"""Encodes the DataObject to a dictionary."""
# Start with the last set of data we got from the API
data = deepcopy(self.api_data)
# Now replace the data with what's actually in our object
for field_name, field in self.fields.iteritems():
value = getattr(self, field.attrname, None)
if value is not None:
data[field.api_name] = field.encode(value)
else:
data[field.api_name] = None
# Now delete any fields that ended up being None
# since we should exclude them in the resulting dict.
for k in data.keys():
if data[k] is None:
del data[k]
return data
@classmethod
def from_dict(cls, data):
"""Decodes a dictionary into a new `DataObject` instance."""
self = cls()
self.update_from_dict(data)
return self
def update_from_dict(self, data):
"""Adds the content of a dictionary to this DataObject.
Parameter `data` is the dictionary from which to update the object.
Use this only when receiving newly updated or partial content for a
DataObject; that is, when the data is from the outside data source and
needs decoded through the object's fields. Data from "inside" your
application should be added to an object manually by setting the
object's attributes. Data that constitutes a new object should be
turned into another object with `from_dict()`.
"""
if not isinstance(data, dict):
raise TypeError
# Clear any local instance field data
for k in self.fields.iterkeys():
if k in self.__dict__:
del self.
|
wagtail/wagtail
|
wagtail/images/api/fields.py
|
Python
|
bsd-3-clause
| 1,340
| 0.001493
|
from collections import OrderedDict
from rest_framework.fields import Field
from ..models import SourceImageIOError
class ImageRenditionField(Field):
"""
A field that generates a rendition with the specified filter spec, and serialises
details of that rendition.
Example:
"thumbnail": {
"url": "/media/images/myimage.max-165x165.jpg",
"width": 165,
"height": 100,
"alt": "Image alt text"
}
If there is an error with the source image. The dict will only contain a single
key, "error", indicating this error:
"thumbnail": {
"error": "SourceImageIOError"
}
"""
def __init__(self, filter_spec, *args, **kwargs):
self.filter_spec = filter_spec
super().__init__(*args, **kwargs)
def to_representation(self, image):
try:
|
thumbnail = image.get_rendition(self.filter_spec)
return OrderedDict(
[
|
("url", thumbnail.url),
("width", thumbnail.width),
("height", thumbnail.height),
("alt", thumbnail.alt),
]
)
except SourceImageIOError:
return OrderedDict(
[
("error", "SourceImageIOError"),
]
)
|
openstack/stacktach-shoebox
|
shoebox/disk_storage.py
|
Python
|
apache-2.0
| 6,327
| 0
|
# Copyright (c) 2014 Dark Secret Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
class InvalidVersion(Exception):
pass
class OutOfSync(Exception):
pass
class EndOfFile(Exception):
pass
BOR_MAGIC_NUMBER = 0x69867884
class Version0(object):
# Preamble ... same for all versions.
# i = 0x69867884 (EVNT)
# h = version
def __init__(self):
self.preamble_schema = "ih"
self.preamble_size = struct.calcsize(self.preamble_schema)
def make_preamble(self, version):
return struct.pack(self.pream
|
ble_schema, BOR_MAGIC_NUMBER, version)
def _check_eof(self, expected, actual):
if actual < expected:
raise EndOfFile()
def load_preamble(self, file_handle):
raw = file_handle.read(self.preamble_size)
self._check_eof(self.preamble_size, len(raw))
header = struct.unpack(self.preamble_schema, raw)
if header[0] != BOR_MAGIC_NUMBER:
raise OutOfSync("Expected Begin
|
ning of Record marker")
return header[1]
class Version1(Version0):
# Version 1 SCHEMA
# ----------------
# i = metadata block length
# i = raw notification block length
# i = 0x00000000 EOR
# Metadata dict block
# i = number of strings (N) - key/value = 2 strings
# N * i = length of key followed by length of value
# N * (*s) = key followed by value
# Raw notification block
# i = length of raw data block
# *s = raw data
# EXAMPLE
# --------
# With above Event and Metadata
#
# Header schema: "iii"
# Metadata length: 119
# Raw notification length: 201
# Metadata = 6 strings (3 key-value pairs)
# Metadata schema: "iiiiiii6s14s10s31s10s20s"
# ------ key/value
# ------ key/value
# ----- key/value
# ------ length of the 6 strings
# - 12 entries (6 string sizes + 6 strings)
# Raw notification: "i197s"
# ---- json notification
# - 197
def __init__(self):
super(Version1, self).__init__()
self.header_schema = "iii"
self.header_size = struct.calcsize(self.header_schema)
def _encode(self, s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def pack(self, notification, metadata):
nsize = len(notification)
raw_block_schema = "i%ds" % nsize
raw_block = struct.pack(raw_block_schema, nsize, notification)
metadata_items = ["i"] # appended with N "%ds"'s
metadata_values = [len(metadata) * 4] # [n]=key, [n+1]=value
for key, value in metadata.iteritems():
key = self._encode(key)
value = self._encode(value)
metadata_items.append("i")
metadata_items.append("i")
metadata_values.append(len(key))
metadata_values.append(len(value))
for key, value in metadata.iteritems():
key = self._encode(key)
value = self._encode(value)
metadata_items.append("%ds" % len(key))
metadata_values.append(key)
metadata_items.append("%ds" % len(value))
metadata_values.append(value)
metadata_schema = "".join(metadata_items)
metadata = struct.pack(metadata_schema, *metadata_values)
header = struct.pack(self.header_schema,
struct.calcsize(metadata_schema),
struct.calcsize(raw_block_schema), 0)
preamble = self.make_preamble(1)
return (preamble, header, metadata, raw_block)
def unpack(self, file_handle):
header_bytes = file_handle.read(self.header_size)
self._check_eof(self.header_size, len(header_bytes))
header = struct.unpack(self.header_schema, header_bytes)
if header[2] != 0:
raise OutOfSync("Didn't find 0 EOR marker.")
metadata_bytes = file_handle.read(header[0])
self._check_eof(header[0], len(metadata_bytes))
num_strings = struct.unpack_from("i", metadata_bytes)
offset = struct.calcsize("i")
lengths = num_strings[0] / 2
lengths_schema = "i" * lengths
key_value_sizes = struct.unpack_from(lengths_schema, metadata_bytes,
offset=offset)
key_value_schema_list = ["%ds" % sz for sz in key_value_sizes]
key_value_schema = "".join(key_value_schema_list)
offset += struct.calcsize(lengths_schema)
key_values = struct.unpack_from(key_value_schema, metadata_bytes,
offset=offset)
metadata = dict((key_values[n], key_values[n + 1])
for n in range(len(key_values))[::2])
raw = file_handle.read(header[1])
self._check_eof(header[1], len(raw))
raw_len = struct.unpack_from("i", raw)
offset = struct.calcsize("i")
jnot = struct.unpack_from("%ds" % raw_len[0], raw, offset=offset)
return (metadata, jnot[0])
VERSIONS = {1: Version1()}
CURRENT_VERSION = 1
def get_version_handler(version=CURRENT_VERSION):
global VERSIONS
version_handler = VERSIONS.get(version)
if not version_handler:
raise InvalidVersion()
return version_handler
def pack_notification(notification, metadata, version=CURRENT_VERSION):
version_handler = get_version_handler(version)
return version_handler.pack(notification, metadata)
def unpack_notification(file_handle):
v0 = Version0()
version = v0.load_preamble(file_handle)
version_handler = get_version_handler(version)
return version_handler.unpack(file_handle)
|
uw-dims/tupelo
|
docs/source/conf.py
|
Python
|
bsd-3-clause
| 10,490
| 0.006292
|
# -*- coding: utf-8 -*-
#
# Tupelo documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 9 09:29:36 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
|
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from sphinx import __version__
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolu
|
te, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
'sphinx.ext.graphviz',
'sphinx.ext.ifconfig',
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Tupelo'
copyright = u'2015, University of Washington'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.2.11'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'UW-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'UW-logo-32x32.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tupelodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# The following comes from
# https://github.com/rtfd/readthedocs.org/issues/416
#
'preamble': "".join((
'\DeclareUnicodeCharacter{00A0}{ }', # NO-BREAK SPACE
'\DeclareUnicodeCharacter{251C}{+}', # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\DeclareUnicodeCharacter{2514}{+}', # BOX DRAWINGS LIGHT UP AND RIGHT
)),
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Tupelo.tex', u'Tupelo Documentation',
u'Stuart Maclean', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'UW-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
|
syci/domsense-agilebg-addons
|
account_followup_choose_payment/__init__.py
|
Python
|
gpl-2.0
| 1,092
| 0.000916
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Agile Business Group sagl (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the
|
implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have re
|
ceived a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import followup
import account_move_line
import account_followup
|
pycontw/pycontw2016
|
src/sponsors/migrations/0006_sponsor_conference.py
|
Python
|
mit
| 579
| 0.001727
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-02 15:41
from __future__ import unicode_literals
from django.d
|
b import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0005_auto_20160530_1255'),
]
operations = [
migrations.AddField(
model_name='sponsor',
name='conference',
field=
|
models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017')], default='pycontw-2017', verbose_name='conference'),
),
]
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/libhio/package.py
|
Python
|
lgpl-2.1
| 2,974
| 0.001009
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libhio(AutotoolsPackage):
"""libHIO is a flexible, high-performance parallel IO package developed
at LANL. libHIO supports IO to either a conventional PFS or to Cray
DataWarp with management of Cray DataWarp space and stage-in and
stage-out from and to the PFS.
"""
homepage = "https://github.com/hpc/libhio"
url = "https://github.com/hpc/libhio/releases/download/hio.1.4.1.0/libhio-1.4.1.0.tar.bz2"
#
# We don't include older versions since they are missing features
# needed by current and future consumers of libhio
#
version('1.4.1.0', '6ef566fd8cf31fdcd05fab01dd3fae44')
#
# main users of libhio thru spack will want to use HFDF5 plugin,
# so make hdf
|
5 variant a default
#
variant('hdf5', default=True, description='Enable HDF5 support')
depends_on("json-c")
depends_on("bzip2")
depends_on("pkgconfig", type="build")
depends_on('mpi')
#
# libhio depends on hdf5+mpi if hdf5 is being used since it
# autodetects the presence of an MPI and/or uses mpicc by default to build
|
depends_on('hdf5+mpi', when='+hdf5')
#
# wow, we need to patch libhio
#
patch('0001-configury-fix-a-problem-with-bz2-configury.patch', when="@1.4.1.0")
patch('0001-hdf5-make-docs-optional.patch', when="@1.4.1.0")
def autoreconf(self, spec, prefix):
autoreconf = which('autoreconf')
autoreconf('-ifv')
def configure_args(self):
spec = self.spec
args = []
args.append('--with-external_bz2={0}'.format(spec['bzip2'].prefix))
if '+hdf5' in spec:
args.append('--with-hdf5={0}'.format(spec['hdf5'].prefix))
return args
|
dcf21/4most-4gp
|
src/pythonModules/fourgp_speclib/fourgp_speclib/tests/test_spectrum_library_sql.py
|
Python
|
mit
| 7,484
| 0.004009
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Unit tests for all SQL implementations of spectrum libraries.
"""
from os import path as os_path
import uuid
import unittest
import numpy as np
import fourgp_speclib
class TestSpectrumLibrarySQL(object):
"""
This class is a mixin which adds lots of standard tests to any SQL-based SpectrumLibrary unittest class.
"""
def test_refresh(self):
"""
Check that we can refresh database connection.
"""
self._lib.refresh_database()
def test_spectrum_retrieval(self):
"""
Check that we can store a single spectra into the SpectrumLibrary and retrieve it again.
"""
# Create a random spectrum to insert into the spectrum library
size = 50
raster = np.arange(size)
values = np.random.random(size)
value_errors = np.random.random(size)
input_spectrum = fourgp_speclib.Spectrum(wavelengths=raster,
values=values,
value_errors=value_errors,
metadata={"origin": "unit-test"})
# Insert it into the spectrum library
self._lib.insert(input_spectrum, "dummy_filename")
# Load it back as a SpectrumArray
my_spectra = self._lib.search()
my_spectrum_array = self._lib.open(filenames=my_spectra[0]['filename'])
# Pick spectrum out of SpectrumArray
my_spectrum = my_spectrum_array.extract_item(0)
# Check that we got back the same spectrum we put in
self.assertEqual(my_spectrum, input_spectrum)
def test_search_illegal_metadata(self):
"""
Check that we can search for spectra on a simple metadata constraint.
"""
# Insert ten random spectra into SpectrumLibrary
size = 50
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
value_errors=np.random.random(size),
metadata={"origin": "unit-test"})
self._lib.insert(input_spectrum, "dummy_filename")
# Search on an item of metadata which doesn't exist
with self.assertRaises(AssertionError):
self._lib.search(x_value=23)
def test_search_1d_numerical_range(self):
"""
Check that we can search for spectra on a simple metadata numerical range constraint.
"""
# Insert ten random spectra into SpectrumLibrary
size = 50
x_values = list(range(10))
for x in x_values:
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
value_errors=np.random.random(size),
metadata={"origin": "unit-test",
"x_value": x})
self._lib.insert(input_spectrum, "x_{}".format(x))
# Search for spectra with x in a defined range
x_range = [4.5, 8.5]
x_values_expected = [x for x in x_values if (x > x_range[0] and x < x_range[1])]
my_spectra = self._lib.search(x_value=x_range)
ids = [str(item["specId"]) for item in my_spectra]
metadata = self._lib.get_metadata(ids=ids)
x_values = [item['x_value'] for item in metadata]
# Check that we got back the same spectrum we put in
self.assertEqual(x_values, x_values_expected)
def test_search_1d_numerical_value(self):
"""
Check that we can search for spectra on a simple metadata numerical point-value constraint.
"""
# Insert ten random spectra into SpectrumLibrary
size = 50
x_values = list(range(10))
for x in x_values:
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
value_errors=np.random.random(size),
metadata={"origin": "unit-test",
"x_value": x})
self._lib.insert(input_spectrum, "x_{}".format(x))
# Search for spectra with matching x_value
my_spectra = self._lib.search(x_value=5)
ids = [str(item["specId"]) for item in my_spectra]
metadata = self._lib.get_metadata(ids=ids)
x_values = [item['x_value'] for item in metadata]
# Check that we got back the same spectrum we put in
self.assertEqual(x_values, [5])
def test_search_1d_string_range(self):
"""
Check that we can search for spectra on a simple metadata string range constraint.
"""
# Insert random spectra into SpectrumLibrary
alphabet = "abcdefghijklmnopqrstuvwxyz"
size = 50
x_values = list(range(12))
|
for x in x_values:
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
|
value_errors=np.random.random(size),
metadata={"origin": "unit-test",
"x_value": alphabet[x:x + 3]})
self._lib.insert(input_spectrum, "x_{}".format(x))
# Search for spectra with x in a defined range
my_spectra = self._lib.search(x_value=["dxx", "h"])
x_values_expected = ["efg", "fgh", "ghi"]
filenames_got = [str(item["filename"]) for item in my_spectra]
x_values_got = [str(i["x_value"]) for i in self._lib.get_metadata(filenames=filenames_got)]
x_values_got.sort()
# Check that we got back the same spectrum we put in
self.assertEqual(x_values_expected, x_values_got)
def test_search_1d_string_value(self):
"""
Check that we can search for spectra on a simple metadata string point-value constraint.
"""
# Insert random spectra into SpectrumLibrary
alphabet = "abcdefghijklmnopqrstuvwxyz"
size = 50
x_values = list(range(10))
for x in x_values:
input_spectrum = fourgp_speclib.Spectrum(wavelengths=np.arange(size),
values=np.random.random(size),
value_errors=np.random.random(size),
metadata={"origin": "unit-test",
"x_value": alphabet[x:x + 3]})
self._lib.insert(input_spectrum, "x_{}".format(x))
# Search for spectra with matching x_value
my_spectra = self._lib.search(x_value="def")
filenames_got = [str(item["filename"]) for item in my_spectra]
x_values_got = [str(i["x_value"]) for i in self._lib.get_metadata(filenames=filenames_got)]
x_values_got.sort()
# Check that we got back the same spectrum we put in
self.assertEqual(x_values_got, ["def"])
|
tseaver/google-cloud-python
|
tasks/google/cloud/tasks_v2beta2/gapic/cloud_tasks_client.py
|
Python
|
apache-2.0
| 93,412
| 0.002516
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.tasks.v2beta2 CloudTasks API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.cloud.tasks_v2beta2.gapic import cloud_tasks_client_config
from google.cloud.tasks_v2beta2.gapic import enums
from google.cloud.tasks_v2beta2.gapic.transports import cloud_tasks_grpc_transport
from google.cloud.tasks_v2beta2.proto import cloudtasks_pb2
from google.cloud.tasks_v2beta2.proto import cloudtasks_pb2_grpc
from google.cloud.tasks_v2beta2.proto import queue_pb2
from google.cloud.tasks_v2beta2.proto import task_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import options_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-tasks").version
class CloudTasksClient(object):
"""
Cloud Tasks allows developers to manage the execution of background
work in their applications.
"""
SERVICE_ADDRESS = "cloudtasks.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.tasks.v2beta2.CloudTasks"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CloudTasksClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_
|
file
@classmethod
def location_path(cls, project, location):
"""Return a fully-qualified location string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}",
project=project,
location=location,
|
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
@classmethod
def queue_path(cls, project, location, queue):
"""Return a fully-qualified queue string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/queues/{queue}",
project=project,
location=location,
queue=queue,
)
@classmethod
def task_path(cls, project, location, queue, task):
"""Return a fully-qualified task string."""
return google.api_core.path_template.expand(
"projects/{project}/locations/{location}/queues/{queue}/tasks/{task}",
project=project,
location=location,
queue=queue,
task=task,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.CloudTasksGrpcTransport,
Callable[[~.Credentials, type], ~.CloudTasksGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = cloud_tasks_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=cloud_tasks_grpc_transport.CloudTasksGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these
|
DevilSeven7/PDFManager
|
PDFManager/UI.py
|
Python
|
gpl-2.0
| 8,592
| 0.018506
|
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from PDFManager.PDFMangerFacade import PDFMangerFacade
class PDFManager_UI:
def __init__(self):
self.i= -1;
self.files=[]
self.root = Tk()
self.root.title('PDFManager')
self.root.wm_iconbitmap("ico.ico") #icona
self.frame = Frame(self.root,height=2,bd=2,relief=SUNKEN,bg='black',)
self.root.resizable(False, False) #settaggio redimensione
#centrare nello schermo
larghezza = self.root.winfo_screenwidth() # larghezza schermo in pixel
altezza = self.root.winfo_screenheight() # altezza schermo in pixel
WIDTH = self.root.winfo_reqwidth()
HEIGHT = self.root.winfo_reqheight()
x = larghezza//2 - WIDTH
y = altezza//2 - HEIGHT
self.root.geometry("%dx%d+%d+%d" % (421,342 , x, y))
self.button_merge = Button(self.root, text = 'Unisci', command=self.__unisci__)
self.button_stitching = Button(self.root,text = 'Dividi',command=self.dividi)
self.button_split = Button(self.root, text = 'Fusione', command=self.__fusione__)
self.button_watermark = Button(self.root, text = 'Filigrana', command=self.__filigrana__)
self.button_encript = Button(self.root, text = 'Cripta', command=self.__cripta__)
self.button_rotate = Button(self.root, text='Ruota', command=self.__ruota__)
self.button_clear =Button(self.root, text='Rimuovi tutto', command=self.__svuota__)
self.password = Entry(self.root)
self.combo_rotate = ttk.Combobox(self.root,state='readonly')
self.combo_rotate['values'] = (0,90,180,270)
lblPass = Label(self.root,text='Password :',anchor=E)
lblGradi = Label(self.root,text='Gradi :',anchor=E)
self.button_add = Button(self.root, text='Aggiungi PDF', command=self.__aggiungi__)
self.button_delete = Button(self.root, text='Rimuovi selezionato', command=self.__rimuovi__)
self.list_file = ttk.Treeview(self.root)
self.list_file['columns'] =('NumeroPagine')
self.list_file.heading("#0",text='NomeFile')
self.list_file.column('#0',anchor=W)
self.list_file.heading('NumeroPagine',text = 'Numero pagine')
self.list_file.column('NumeroPagine',anchor='center',width=100)
self.button_add.grid(row=0, column= 0,columnspan=2,sticky=(W,E))
self.button_delete.grid(row=1,column=0,columnspan=2,sticky=(W,E))
self.button_clear.grid(row = 2,column=0,columnspan=2,sticky=(W,E))
self.list_file.grid(row=0,column=2,columnspan=3,rowspan=3)
self.frame.grid(row=3,column=0,columnspan=5,sticky=(W,E),pady=5)
self.button_merge.grid(row=4,column=0,columnspan=2,sticky=(W,E))
self.button_stitching.grid(row=4,column=3,columnspan=2,sticky=(W,E))
self.button_split.grid(row=5,column=0,columnspan=2,sticky=(W,E))
self.button_watermark.grid(row=5,column=3,columnspan=2,sticky=(W,E))
self.button_encript.grid(row=6,column=0,columnspan=2,sticky=(W,E))
lblPass.grid(row=6,column=2)
self.password.grid(row=6,column=3,columnspan=2,sticky=(W,E))
self.button_rotate.grid(row=7,column=0,columnspan=2,sticky=(W,E))
lblGradi.grid(row=7,column=2)
self.combo_rotate.grid(row=7,column=3,columnspan=2,sticky=(W,E))
self.button_stitching.config(state=DISABLED)
self.button_encript.config(state=DISABLED)
self.button_watermark.config(state=DISABLED)
self.button_merge.config(state=DISABLED)
self.button_split.config(state=DISABLED)
self.button_rotate.config(state=DISABLED)
def __aggiungi__(self):
filelist = filedialog.askopenfilenames(filetypes=[("PDF file",".pdf")])
for file in filelist:
if(file in self.files):
continue
self.i = self.i+1
self.files.append(file)
split = file.split("/").pop()
self.list_file.insert("",self.i,text=split,values=(PDFMangerFacade.pagescount(file)))
self.__controlla__()
def __rimuovi__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
del(self.files[posizione])
self.list_file.delete(pos)
self.i= self.i-1
print(self.files)
|
except IndexError:
messagebox.showwarning("Attenzione","Nessun elemento selezionato")
self.__controlla__()
def __unisci__(self):
try:
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
if(name.endswith('.pdf') == False):
name = name+'.pdf'
PDFMangerFacade.merge(*self.files, filenameOut=name)
except Exception as e
|
:
messagebox.showwarning("Attenzione",e)
def __svuota__(self):
self.files = []
self.list_file.delete(*self.list_file.get_children())
def dividi(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
phat = filedialog.askdirectory()
prefisso = (self.files[posizione].split("/").pop()).split('.')[0]
PDFMangerFacade.stitching(self.files[posizione], phat + '/' + prefisso)
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato")
def __fusione__(self):
try:
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.splitting(*self.files,filenameOut = name)
except IndexError as e:
messagebox.showwarning("Attenzione",e)
def __filigrana__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
print(self.files[posizione])
name_filigrana = filedialog.askopenfilename(filetypes=[("PDF file",".pdf")])
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.watermark(self.files[posizione], name_filigrana, name)
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
def __cripta__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
password = self.password.get()
if(password == ""):
messagebox.showwarning("Attenzione","Inserire una password.")
return
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.encrypt(self.files[posizione], password, name);
self.password.delete(0,'end')
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
def __ruota__(self):
try:
pos = self.list_file.selection()[0]
posizione = self.list_file.index(pos)
gradi = int(self.combo_rotate.get())
name = filedialog.asksaveasfilename(filetypes=[("PDF file",".pdf")])
PDFMangerFacade.rotatePage(self.files[posizione],name,gradi);
except IndexError:
messagebox.showwarning("Attenzione","Elemento non selezionato.")
except ValueError:
messagebox.showwarning("Attenzione","Selezionare il grado di rotazione.")
def start(self):
self.root.mainloop()
def __controlla__(self):
if((self.i+1) == 0):
self.button_stitching.config(state=DISABLED)
self.button_encript.config(state=DISABLED)
self.button_watermark.config(state=DISABLED)
self.button_merge.config(state=DISABLED)
self.button_split.config(state=DISABLED)
self.button_rotate.config(state=DISABLED)
if((self.i+1) ==1):
self.button_stitching.config(state=NORMAL)
self.button_encript.config(state=NORMAL)
self.button_watermark.config(state=NORMAL)
self.button_merge.config(state=DISABLED)
self.button_split.config
|
bendk/thesquirrel
|
events/tests/test_forms.py
|
Python
|
agpl-3.0
| 7,189
| 0.000556
|
# thesquirrel.org
#
# Copyright (C) 2015 Flying Squirrel Community Space
#
# thesquirrel.org is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# thesquirrel.org is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
# License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with thesquirrel.org. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from datetime import date, time
import mock
from django.test import TestCase
from thesquirrel.factories import *
from ..factories import *
from ..forms import (EventForm, EventRepeatForm, EventRepeatExcludeForm,
CompositeEventForm)
from ..models import EventRepeat, EventRepeatExclude
class EventFormTest(TestCase):
def test_start_date_must_be_after_end_date(self):
form = EventForm(data={
'title': 'test-title',
'description': 'test-description',
'date': '1/1/2015',
'start_time': '18:30',
'end_time': '16:30',
})
assert not form.is_valid()
def test_save(self):
form = EventForm(data={
'title': 'test-title',
'description': 'test-description',
'location': 'Library',
'bottomliner': 'Santa',
'date': '1/1/2015',
'start_time': '18:30',
'end_time': '19:30',
})
assert form.is_valid()
form.save()
class EventRepeatFormTest(TestCase):
def make_form(self, update=False, number=1):
if update:
event = EventFactory(with_repeat=True)
instance = event.repeat_set.all().get()
else:
event = EventFactory()
instance = None
return EventRepeatForm(number, instance=instance)
def make_form_with_data(self, update=False, no_days=False,
empty_type=False, number=1):
if update:
event = EventFactory(with_repeat=True)
instance = event.repeat_set.all().get()
else:
|
event = EventFactory()
instance = None
data = {
'type': '1M' if not empty_type else '',
'start_date': '1/1/2015',
'we': True if not no_days else False,
'end_date': '2/1/2015',
'start_time': '16:30',
'end_time': '18:30',
}
return EventRepeatForm(number, instance=instance, data=data)
def test_save(se
|
lf):
form = self.make_form_with_data()
assert form.is_valid()
event = EventFactory()
repeat = form.save(event)
assert repeat.event == event
def test_one_weekday_required(self):
form = self.make_form_with_data(no_days=True)
assert not form.is_valid()
def test_empty_type_doesnt_create_new(self):
form = self.make_form_with_data(empty_type=True)
assert form.is_valid()
event = EventFactory()
form.save(event)
assert not event.repeat_set.all().exists()
def test_empty_type_deletes_existing(self):
form = self.make_form_with_data(update=True, empty_type=True)
assert form.is_valid()
event = EventFactory()
form.save(event)
assert not event.repeat_set.all().exists()
def check_empty_type_label(self, form, correct_label):
empty_type_label = None
for value, label in form.fields['type'].choices:
if value == '':
empty_type_label = label
break
assert empty_type_label is not None
assert empty_type_label == correct_label
def test_empty_type_labels(self):
form = self.make_form()
self.check_empty_type_label(self.make_form(), u'No repeat')
self.check_empty_type_label(self.make_form(update=True),
u'Delete repeat')
def test_headings(self):
assert self.make_form().heading == 'Repeat'
assert self.make_form(number=2).heading == 'Repeat #2'
class EventRepeatExcludeFormTest(TestCase):
def test_create_excludes(self):
event = EventFactory(with_repeat=True, with_exclude=True)
form = EventRepeatExcludeForm(data={
'dates': ['2/4/2015', '2/5/2015'],
})
assert form.is_valid()
form.save(event)
def test_invalid_value(self):
form = EventRepeatExcludeForm(data={
'dates': ['invalid-date'],
})
assert not form.is_valid()
class CompositeEventFormTest(TestCase):
def test_initial_excludes(self):
event = EventFactory(with_repeat=True, with_exclude=True)
form = CompositeEventForm(event)
assert form.exclude_form.initial['dates'] == [
e.date for e in event.excludes.all()
]
def mock_out_subforms(self, composite_form):
def mock_subform():
return mock.Mock(
is_valid=mock.Mock(return_value=True),
)
composite_form.event_form = mock_subform()
composite_form.exclude_form = mock_subform()
for i in range(len(composite_form.repeat_forms)):
composite_form.repeat_forms[i] = mock_subform()
for i in range(len(composite_form.update_repeat_forms)):
composite_form.update_repeat_forms[i] = mock_subform()
return composite_form
def test_is_valid(self):
event = EventFactory(with_repeat=True)
form = self.mock_out_subforms(CompositeEventForm(event))
assert form.is_valid()
assert form.event_form.is_valid.called
for repeat_form in form.repeat_forms:
assert repeat_form.is_valid.called
for update_form in form.update_repeat_forms:
assert update_form.is_valid.called
def test_is_valid_return_false(self):
event = EventFactory(with_repeat=True)
form = self.mock_out_subforms(CompositeEventForm(event))
form.event_form.is_valid.return_value = False
assert not form.is_valid()
# Even though event_form.is_valid() returns False, we should still
# call is_valid for each subform so that the ErrorDict is generated.
assert form.event_form.is_valid.called
for repeat_form in form.repeat_forms:
assert repeat_form.is_valid.called
for update_form in form.update_repeat_forms:
assert update_form.is_valid.called
def test_save(self):
event = EventFactory(with_repeat=True)
form = self.mock_out_subforms(CompositeEventForm(event))
saved_event = form.event_form.save.return_value
assert form.save() == saved_event
for repeat_form in form.repeat_forms:
assert repeat_form.save.call_args
for update_form in form.update_repeat_forms:
assert update_form.save.call_args
|
openwisp/django-x509
|
tests/manage.py
|
Python
|
bsd-3-clause
| 252
| 0
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJA
|
NGO_SETTINGS_MODULE', 'openwisp2.settings'
|
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-pickleshare/package.py
|
Python
|
lgpl-2.1
| 764
| 0.003927
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level
|
COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPickleshare(PythonPackage):
"""Tiny 'shelve'-like database with concurrency
|
support"""
pypi = "pickleshare/pickleshare-0.7.4.tar.gz"
version('0.7.5', sha256='87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca')
version('0.7.4', sha256='84a9257227dfdd6fe1b4be1319096c20eb85ff1e82c7932f36efccfe1b09737b')
depends_on('python@2.7:2.8,3:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pathlib2', type=('build', 'run'), when='^python@2.6:2.8,3.2:3.3')
|
sbrichards/rockstor-core
|
src/rockstor/smart_manager/data_collector.py
|
Python
|
gpl-3.0
| 15,176
| 0.00112
|
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
Extracting data from procfs
"""
import re
from multiprocessing import Process
import time
import os
from datetime import datetime
from django.utils.timezone import utc
from django.db import transaction
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from smart_manager.models import (CPUMetric, LoadAvg, MemInfo, PoolUsage,
DiskStat, ShareUsage, NetStat, ServiceStatus)
from storageadmin.models import (Disk, Pool, Share, Snapshot, NetworkInterface)
from fs.btrfs import pool_usage, shares_usage
import logging
logger = logging.getLogger(__name__)
class ProcRetreiver(Process):
def __init__(self
|
):
self.ppid = os.getpid()
self.sleep_time = 1
self._num_ts_records = 0
super(ProcRetreiver, self).__init__()
def _save_wrapper(self, ro):
ro.save()
self._num_ts_records = self._num_ts_records + 1
def _truncate_ts_data(self, max_records=settings.MAX_TS_RECORDS):
"""
cleanup ts tables: CPUMetric, LoadAvg, MemInfo, PoolUsage,
|
DiskStat and ShareUsage, ServiceStatus
Discard all records older than last max_records.
"""
ts_models = (CPUMetric, LoadAvg, MemInfo, PoolUsage, DiskStat,
ShareUsage, ServiceStatus)
try:
for m in ts_models:
try:
latest_id = m.objects.latest('id').id
except ObjectDoesNotExist, e:
msg = ('Unable to get latest id for the model: %s. '
'Moving on' % (m.__name__))
logger.error(msg)
continue
m.objects.filter(id__lt=latest_id-max_records).delete()
except Exception, e:
logger.error('Unable to truncate time series data')
logger.exception(e)
raise e
def run(self):
# extract metrics and put in q
pu_time = time.mktime(time.gmtime())
loadavg_time = pu_time
cur_disk_stats = None
cur_net_stats = None
cur_cpu_stats = {}
try:
self._truncate_ts_data()
while (True):
if (os.getppid() != self.ppid):
msg = ('Parent process(smd) exited. I am exiting too.')
return logger.error(msg)
if (self._num_ts_records > (settings.MAX_TS_RECORDS *
settings.MAX_TS_MULTIPLIER)):
self._truncate_ts_data()
self._num_ts_records = 0
with transaction.atomic(using='smart_manager'):
cur_cpu_stats = self.cpu_stats(cur_cpu_stats)
loadavg_time = self.loadavg(loadavg_time)
self.meminfo()
pu_time = self.pools_usage(pu_time)
cur_disk_stats = self.disk_stats(cur_disk_stats,
self.sleep_time)
cur_net_stats = self.network_stats(cur_net_stats,
self.sleep_time)
time.sleep(self.sleep_time)
except Exception, e:
logger.error('unhandled exception in %s. Exiting' % self.name)
logger.exception(e)
raise e
def cpu_stats(self, prev_stats):
stats_file = '/proc/stat'
cur_stats = {}
with open(stats_file) as sfo:
ts = datetime.utcnow().replace(tzinfo=utc)
for line in sfo.readlines():
if (re.match('cpu\d', line) is not None):
fields = line.split()
fields[1:] = map(int, fields[1:])
cm = None
if (fields[0] not in prev_stats):
cm = CPUMetric(name=fields[0], umode=fields[1],
umode_nice=fields[2], smode=fields[3],
idle=fields[4], ts=ts)
else:
prev = prev_stats[fields[0]]
cm = CPUMetric(name=fields[0], umode=fields[1]-prev[1],
umode_nice=fields[2]-prev[2],
smode=fields[3]-prev[3],
idle=fields[4]-prev[4], ts=ts)
cur_stats[fields[0]] = fields
self._save_wrapper(cm)
return cur_stats
def disk_stats(self, prev_stats, interval):
stats_file = '/proc/diskstats'
cur_stats = {}
disks = [d.name for d in Disk.objects.all()]
with open(stats_file) as sfo:
for line in sfo.readlines():
fields = line.split()
if (fields[2] not in disks):
continue
cur_stats[fields[2]] = fields[2:]
if (isinstance(prev_stats, dict)):
ts = datetime.utcnow().replace(tzinfo=utc)
for disk in cur_stats.keys():
if (disk in prev_stats):
prev = prev_stats[disk]
cur = cur_stats[disk]
data = []
for i in range(1, len(prev)):
if (i == 9):
# special case for pending ios
# just take average
avg_ios = (float(cur[i]) + float(prev[i]))/2
data.append(avg_ios)
continue
datum = None
if (cur[i] < prev[i]):
datum = float(cur[i])/interval
else:
datum = (float(cur[i]) - float(prev[i]))/interval
data.append(datum)
ds = DiskStat(name=disk, reads_completed=data[0],
reads_merged=data[1],
sectors_read=data[2],
ms_reading=data[3],
writes_completed=data[4],
writes_merged=data[5],
sectors_written=data[6],
ms_writing=data[7],
ios_progress=data[8],
ms_ios=data[9],
weighted_ios=data[10],
ts=ts)
self._save_wrapper(ds)
return cur_stats
def loadavg(self, last_ts):
now = time.mktime(time.gmtime())
if (now - last_ts < 30):
return last_ts
stats_file = '/proc/loadavg'
with open(stats_file) as sfo, open('/proc/uptime') as ufo:
line = sfo.readline()
fields = line.split()
thread_fields = fields[3].split('/')
idle_seconds = int(float(ufo.readline().split()[1]))
ts = datetime.utcnow().replace(tzinfo=utc)
la = LoadAvg(load_1=fields[0], load_5=fields[1], load_15=fields[2],
active_threads=thread_fields[0],
total_threads=thread_fields[1], latest_pid=fields[4],
idle_seconds=idle_seconds, ts=ts)
self._save_wrapper(la)
return now
def meminfo(self):
stats_file = '/proc/mem
|
benspaulding/django-epio-example
|
example/apps/things/admin.py
|
Python
|
bsd-3-clause
| 661
| 0
|
from django.contrib import admin
from django.utils.translation imp
|
ort ugettext_lazy as _
from example.apps.things.models import Thing
class ThingAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('name', 'slug', 'image', 'description'),
}),
(_(u'Dates'),
|
{
'fields': ('created', 'modified'),
'classes': ('collapse', ),
}),
)
list_display = ('name', 'slug')
list_filter = ('created', 'modified')
prepopulated_fields = {'slug': ('name', )}
readonly_fields = ('created', 'modified')
search_fields = ('name', 'slug')
admin.site.register(Thing, ThingAdmin)
|
JohnLZeller/dd-agent
|
checks.d/docker.py
|
Python
|
bsd-3-clause
| 18,138
| 0.003418
|
# stdlib
import urllib2
import urllib
import httplib
import socket
import os
import re
import time
from urlparse import urlsplit
from util import json
from collections import defaultdict
# project
from checks import AgentCheck
from config import _is_affirmative
EVENT_TYPE = SOURCE_TYPE_NAME = 'docker'
CGROUP_METRICS = [
{
"cgroup": "memory",
"file": "memory.stat",
"metrics": {
# Default metrics
"cache": ("docker.mem.cache", "gauge", True),
"rss": ("docker.mem.rss", "gauge", True),
"swap": ("docker.mem.swap", "gauge", True),
# Optional metrics
"active_anon": ("docker.mem.active_anon", "gauge", False),
"active_file": ("docker.mem.active_file", "gauge", False),
"inactive_anon": ("docker.mem.inactive_anon", "gauge", False),
"inactive_file": ("docker.mem.inactive_file", "gauge", False),
"mapped_file": ("docker.mem.mapped_file", "gauge", False),
"pgfault": ("docker.mem.pgfault", "rate", False),
"pgmajfault": ("docker.mem.pgmajfault", "rate", False),
"pgpgin": ("docker.mem.pgpgin", "rate", False),
"pgpgout": ("docker.mem.pgpgout", "rate", False),
"unevictable": ("docker.mem.unevictable", "gauge", False),
}
},
{
"cgroup": "cpuacct",
"file": "cpuacct.stat",
"metrics": {
"user": ("docker.cpu.user", "rate", True),
"system": ("docker.cpu.system", "rate", True),
},
},
]
DOCKER_METRICS = {
"SizeRw": ("docker.disk.size", "gauge"),
}
DOCKER_TAGS = [
"Command",
"Image",
]
NEW_TAGS_MAP = {
"name": "container_name",
"image": "docker_image",
"command": "container_command",
}
DEFAULT_SOCKET_TIMEOUT = 5
class UnixHTTPConnection(httplib.HTTPConnection):
"""Class used in conjuction with UnixSocketHandler to make urllib2
compatible with Unix sockets."""
socket_timeout = DEFAULT_SOCKET_TIMEOUT
def __init__(self, unix_socket):
self._unix_socket = unix_socket
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self._unix_socket)
sock.settimeout(self.socket_timeout)
self.sock = sock
def __call__(self, *args, **kwargs):
httplib.HTTPConnection.__init__(self, *args, **kwargs)
return self
class UnixSocketHandler(urllib2.AbstractHTTPHandler):
"""Class that makes Unix sockets work with urllib2 without any additional
dependencies."""
def unix_open(self, req):
full_path = "%s%s" % urlsplit(req.get_full_url())[1:3]
path = os.path.sep
for part in full_path.split("/"):
path = os.path.join(path, part)
if not os.path.exists(path):
break
unix_socket = path
# add a host or else urllib2 complains
url = req.get_full_url().replace(unix_socket, "/localhost")
new_req = urllib2.Request(url, req.get_data(), dict(req.header_items()))
new_req.timeout = req.timeout
return self.do_open(UnixHTTPConnection(unix_socket), new_req)
unix_request = urllib2.AbstractHTTPHandler.do_request_
class Docker(AgentCheck):
"""Collect metrics and events from Docker API and cgroups"""
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Initialize a HTTP opener with Unix socket support
socket_timeout = int(init_config.get('socket_timeout', 0)) or DEFAULT_SOCKET_TIMEOUT
UnixHTTPConnection.socket_timeout = socket_timeout
self.url_opener = urllib2.build_opener(UnixSocketHandler())
# Locate cgroups directories
self._mountpoints = {}
self._cgroup_filename_pattern = None
docker_root = init_config.get('docker_root', '/')
for metric in CGROUP_METRICS:
self._mountpoints[metric["cgroup"]] = self._find_cgroup(metric["cgroup"], docker_root)
self._last_event_collection_ts = defaultdict(lambda: None)
def check(self, instance):
# Report image metrics
if _is_affirmative(instance.get('collect_images_stats', True)):
self._count_images(instance)
# Get the list of containers and the index of their names
containers, ids_to_names = self._get_and_count_containers(instance)
# Report container metrics from cgroups
skipped_container_ids = self._report_containers_metrics(containers, instance)
# Send events from Docker API
if _is_affirmative(instance.get('collect_events', True)):
self._process_events(instance, ids_to_names, skipped_container_ids)
# Containers
def _count_images(self, instance):
# It's not an important metric, keep going if it fails
try:
tags = instance.get("tags", [])
active_images = len(self._get_images(instance, get_all=False))
all_images = len(self._get_images(instance, get_all=True))
self.gauge("docker.images.available", active_images, tags=tags)
self.gauge("docker.images.intermediate", (all_images - active_images), tags=tags)
except Exception, e:
self.warning("Failed to count Docker images. Exception: {0}".format(e))
def _get_and_count_containers(self, instance):
tags = instance.get("tags", [])
with_size = _is_affirmative(instance.get('collect_container_size', False))
service_check_name = 'docker.service_up'
try:
running_containers = self._get_containers(instance, with_size=with_size)
all_containers = self._get_containers(instance, get_all=True)
except (socket.timeout, urllib2.URLError), e:
self.service_check(service_check_name, AgentCheck.CRITICAL,
message="Unable to list Docker containers: {0}".format(e), tags=tags)
raise Exception("Failed to collect the list of containers. Exception: {0}".format(e))
self.service_check(service_check_name, AgentCheck.OK, tags=tags)
running_containers_ids = set([container['Id'] for container in running_containers])
for container in all_containers:
container_tags = list(tags)
for key in DOCKER_TAGS:
tag = self._make_tag(key, container[key], instance)
if tag:
container_tags.append(tag)
if container['Id'] in running_containers_ids:
self.set("docker.containers.running", container['Id'], tags=container_tags)
else:
self.set("docker.containers.stopped", container['Id'], tags=container_tags)
# The index of the names is used to generate and format events
ids_to_names = {}
for container in all_containers:
ids_to_names[container['Id']] = container['Names'][0].lstrip("/
|
")
return running_containers, ids_to_names
def _prepare_filters(self, instance):
# The reasoning is to check exclude first, so we can skip if there is no exclude
if not instance.get("exclude"):
return False
# Compile regex
|
instance["exclude_patterns"] = [re.compile(rule) for rule in instance.get("exclude", [])]
instance["include_patterns"] = [re.compile(rule) for rule in instance.get("include", [])]
return True
def _is_container_excluded(self, instance, tags):
if self._tags_match_patterns(tags, instance.get("exclude_patterns")):
if self._tags_match_patterns(tags, instance.get("include_patterns")):
return False
return True
return False
def _tags_match_patterns(self, tags, filters):
for rule in filters:
for tag in tags:
if re.match(rule, tag):
return True
return False
def _report_containers_metrics(self, containers, instance):
skipped_container_ids = []
collect_uncommon_metrics = _is_affirmative(instance.get("collect_all_metrics", False))
|
ScreamingUdder/mantid
|
Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/TransformToIqt.py
|
Python
|
gpl-3.0
| 13,257
| 0.002263
|
# pylint: disable=no-init,too-many-instance-attributes
from __future__ import (absolute_import, division, print_function)
from mantid.simpleapi import *
from mantid.api import (PythonAlgorithm, AlgorithmFactory, MatrixWorkspaceProperty,
ITableWorkspaceProperty, PropertyMode, Progress)
from mantid.kernel import Direction, logger
import math
class TransformToIqt(PythonAlgorithm):
_sample = None
_resolution = None
_e_min = None
_e_max = None
_e_width = None
_number_points_per_bin = None
_parameter_table = None
_output_workspace = None
_dry_run = None
def category(self):
return "Workflow\\Inelastic;Workflow\\MIDAS"
def summary(self):
return 'Transforms an inelastic reduction to I(Q, t)'
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty('SampleWorkspace', '',
optional=PropertyMode.Mandatory,
direction=Direction.Input),
doc="Name for the sample workspace.")
self.declareProperty(MatrixWorkspaceProperty('ResolutionWorkspace', '',
optional=PropertyMode.Mandatory,
direction=Direction.Input),
doc="Name for the resolution workspace.")
self.declareProperty(name='EnergyMin', defaultValue=-0.5,
doc='Minimum energy for fi
|
t. Default=-0.5')
self.declareProperty(name='EnergyMax', defaultValue=0.5,
doc='Maximum energy for fit. Default=0.5')
self.declareProperty(name='BinReductionFactor', defaultValue=10.0,
doc='Decr
|
ease total number of spectrum points by this ratio through merging of '
'intensities from neighbouring bins. Default=1')
self.declareProperty(ITableWorkspaceProperty('ParameterWorkspace', '',
direction=Direction.Output,
optional=PropertyMode.Optional),
doc='Table workspace for saving TransformToIqt properties')
self.declareProperty(MatrixWorkspaceProperty('OutputWorkspace', '',
direction=Direction.Output,
optional=PropertyMode.Optional),
doc='Output workspace')
self.declareProperty(name='DryRun', defaultValue=False,
doc='Only calculate and output the parameters')
def PyExec(self):
self._setup()
self._calculate_parameters()
if not self._dry_run:
self._transform()
self._add_logs()
else:
skip_prog = Progress(self, start=0.3, end=1.0, nreports=2)
skip_prog.report('skipping transform')
skip_prog.report('skipping add logs')
logger.information('Dry run, will not run TransformToIqt')
self.setProperty('ParameterWorkspace', self._parameter_table)
self.setProperty('OutputWorkspace', self._output_workspace)
def _setup(self):
"""
Gets algorithm properties.
"""
from IndirectCommon import getWSprefix
self._sample = self.getPropertyValue('SampleWorkspace')
self._resolution = self.getPropertyValue('ResolutionWorkspace')
self._e_min = self.getProperty('EnergyMin').value
self._e_max = self.getProperty('EnergyMax').value
self._number_points_per_bin = self.getProperty('BinReductionFactor').value
self._parameter_table = self.getPropertyValue('ParameterWorkspace')
if self._parameter_table == '':
self._parameter_table = getWSprefix(self._sample) + 'TransformToIqtParameters'
self._output_workspace = self.getPropertyValue('OutputWorkspace')
if self._output_workspace == '':
self._output_workspace = getWSprefix(self._sample) + 'iqt'
self._dry_run = self.getProperty('DryRun').value
def validateInputs(self):
"""
Validate input properties.
"""
issues = dict()
e_min = self.getProperty('EnergyMin').value
e_max = self.getProperty('EnergyMax').value
# Check for swapped energy values
if e_min > e_max:
energy_swapped = 'EnergyMin is greater than EnergyMax'
issues['EnergyMin'] = energy_swapped
issues['EnergyMax'] = energy_swapped
return issues
def _calculate_parameters(self):
"""
Calculates the TransformToIqt parameters and saves in a table workspace.
"""
workflow_prog = Progress(self, start=0.0, end=0.3, nreports=8)
workflow_prog.report('Croping Workspace')
CropWorkspace(InputWorkspace=self._sample,
OutputWorkspace='__TransformToIqt_sample_cropped',
Xmin=self._e_min,
Xmax=self._e_max)
workflow_prog.report('Calculating table properties')
x_data = mtd['__TransformToIqt_sample_cropped'].readX(0)
number_input_points = len(x_data) - 1
num_bins = int(number_input_points / self._number_points_per_bin)
self._e_width = (abs(self._e_min) + abs(self._e_max)) / num_bins
workflow_prog.report('Attemping to Access IPF')
try:
workflow_prog.report('Access IPF')
instrument = mtd[self._sample].getInstrument()
analyserName = instrument.getStringParameter('analyser')[0]
analyser = instrument.getComponentByName(analyserName)
if analyser is not None:
logger.debug('Found %s component in instrument %s, will look for resolution there'
% (analyserName, instrument))
resolution = analyser.getNumberParameter('resolution')[0]
else:
logger.debug('No %s component found on instrument %s, will look for resolution in top level instrument'
% (analyserName, instrument))
resolution = instrument.getNumberParameter('resolution')[0]
logger.information('Got resolution from IPF: %f' % resolution)
workflow_prog.report('IPF resolution obtained')
except (AttributeError, IndexError):
workflow_prog.report('Resorting to Default')
resolution = 0.0175
logger.warning('Could not get resolution from IPF, using default value: %f' % (resolution))
resolution_bins = int(round((2 * resolution) / self._e_width))
if resolution_bins < 5:
logger.warning('Resolution curve has <5 points. Results may be unreliable.')
workflow_prog.report('Creating Parameter table')
param_table = CreateEmptyTableWorkspace(OutputWorkspace=self._parameter_table)
workflow_prog.report('Populating Parameter table')
param_table.addColumn('int', 'SampleInputBins')
param_table.addColumn('float', 'BinReductionFactor')
param_table.addColumn('int', 'SampleOutputBins')
param_table.addColumn('float', 'EnergyMin')
param_table.addColumn('float', 'EnergyMax')
param_table.addColumn('float', 'EnergyWidth')
param_table.addColumn('float', 'Resolution')
param_table.addColumn('int', 'ResolutionBins')
param_table.addRow([number_input_points, self._number_points_per_bin, num_bins,
self._e_min, self._e_max, self._e_width,
resolution, resolution_bins])
workflow_prog.report('Deleting temp Workspace')
DeleteWorkspace('__TransformToIqt_sample_cropped')
self.setProperty('ParameterWorkspace', param_table)
def _add_logs(self):
sample_logs = [('iqt_sample_workspace', self._sample),
('iqt_resolution_workspace', self._resolution),
('iqt_binning', '%f,%f,%f'
|
henrytao-me/openerp.positionq
|
addons/positionq/pq_salary/pq_thang_luong.py
|
Python
|
agpl-3.0
| 1,548
| 0.007175
|
# -*- coding: utf-8 -*-
from openerp.osv import osv, fields
from openerp.tools.translate import _
import logging
from datetime import datetime
from openerp.osv.fields import datetime as datetime_field
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT
from unidecode import unidecode
import types
class pq_thang_luong(osv.osv):
_name = 'pq.thang.luong'
_description = 'Thang Luong'
_columns = {
'name': fields.char('Tên', size=128, required=True),
'ty_le': fields.float('Tỷ lệ', digits=(16,2)),
'create_date': fields.datetime('Ngày giờ tạo', readonly=True),
'user_id': fields.many2one('res.users', string="Người tạo", readonly=True),
}
_defaults = {
'ty_l
|
e': lambda *x: 1,
'user_id': la
|
mbda self, cr, uid, context = None: uid,
}
_sql_constraints = [
]
def create(self, cr, uid, vals, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_thang_luong, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_thang_luong, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
self.pool.get('pq.redis').clear_all(cr, uid)
return super(pq_thang_luong, self).unlink(cr, uid, ids, context)
pq_thang_luong()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/opus_core/datasets/interaction_dataset.py
|
Python
|
gpl-2.0
| 33,528
| 0.007904
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.resources import Resources
from opus_core.misc import take_choices, do_id_mapping_dict_from_array
from opus_core.misc import DebugPrinter, get_distinct_list, unique
from opus_core.datasets.dataset import Dataset
from opus_core.variables.variable_factory import VariableFactory
from opus_core.variables.attribute_type import AttributeType
from opus_core.variables.variable import get_dependency_datasets
from opus_core.storage_factory import StorageFactory
from opus_core.logger import logger
from numpy import array, repeat, ndarray, reshape
from numpy import indices, zeros, float32, asarray, arange
from numpy import logical_not, where, ones, take, resize
from opus_core.variables.variable_name import VariableName
from numpy import ma
from gc import collect
class InteractionDataset(Dataset):
"""Class serves as a holder of interaction variables."""
def __init__(self, resources=None, dataset1=None, dataset2=None, index1=None, index2=None, dataset_name=None,
debug=None):
""" Argument 'resources' is of type Resources. It is merged with arguments. It should contain:
dataset1 - agent class
dataset2 - class of the choice dataset
Optional:
index1 - 1D array, indices of dataset1
index2 - If 2D array: row i contains indices of individuals of dataset2 that belong to
i-th individual of dataset1[index1].
If 1D array: indices of individuals of dataset2 for all individuals of dataset1[index1].
dataset_name - subdirectory in which implementation of the interaction variables is placed (default "")
dataset1.resources and dataset2.resources should contain key 'dataset_name' (see Dataset.get_dataset_name()).
"""
self.resources = Resources(resources)
self.resources.merge_if_not_None({
"dataset1":dataset1, "dataset2":dataset2,
"index1":index1, "index2":index2,
"dataset_name":dataset_name, "debug":debug})
sel
|
f.attribute_boxes = {}
self.attribute_names = []
self.debug = self.resources.get("debug", 0)
if not isinstance(self.debug, DebugPrinter):
self.debug = DebugPrinter(self.debug)
self.resources.check_obligatory_keys(["dataset1", "dataset2"])
self.dataset1 = self.resources["dataset1"]
self.dataset2 = self.resources["dataset2"]
self.index1 = self.resources.get("index1", None)
self.index2 = self.resources.get
|
("index2", None)
self.dataset_name = self.resources.get("dataset_name", None)
if self.dataset_name == None:
self.dataset_name = self.dataset1.get_dataset_name() + '_x_' + self.dataset2.get_dataset_name()
self._primary_attribute_names=[]
self.index1_mapping = {}
if self.index1 <> None:
self.index1_mapping = do_id_mapping_dict_from_array(self.index1)
self._id_names = None # for compatibility with Dataset
self.variable_factory = VariableFactory()
self._aliases = {} # for compatibility with Dataset
def get_attribute(self, name):
""" Return an array of the (by the argument name) given attribute. """
if not isinstance(name, VariableName):
attr_name = VariableName(name)
else:
attr_name = name
alias = attr_name.get_alias()
dataset_name = attr_name.get_dataset_name()
if not (alias in self.get_attribute_names()):
if dataset_name == self.get_dataset(1).dataset_name:
index = self.get_2d_index_of_dataset1()
return self.get_dataset(1).get_attribute_by_index(attr_name, index)
if dataset_name == self.get_dataset(2).dataset_name:
index = self.get_2d_index()
return self.get_dataset(2).get_attribute_by_index(attr_name, index)
if alias in self.get_dataset(1).get_known_attribute_names():
index = self.get_2d_index_of_dataset1()
return self.get_dataset(1).get_attribute_by_index(attr_name, index)
if alias in self.get_dataset(2).get_known_attribute_names():
index = self.get_2d_index()
return self.get_dataset(2).get_attribute_by_index(attr_name, index)
self._raise_error(NameError, "Variable %s not found!" % alias)
return self.attribute_boxes[alias].get_data()
def get_attribute_of_dataset(self, name, dataset_number=1):
""" Return values of attribute given by 'name' belonging to the given dataset,
possibly filtred by the corresponding indes. It is a 1d array of size
reduced_n or reduced_m.
"""
index = self.get_index(dataset_number)
if index <> None:
return self.get_dataset(dataset_number).get_attribute_by_index(name, index)
return self.get_dataset(dataset_number).get_attribute(name)
def get_id_attribute_of_dataset(self, dataset_number=1):
"""Like 'get_attribute_of_dataset' where name is the id_name of the given dataset.
"""
index = self.get_index(dataset_number)
if index <> None:
return self.get_dataset(dataset_number).get_id_attribute()[index]
return self.get_dataset(dataset_number).get_id_attribute()
def _compute_if_needed(self, name, dataset_pool, resources=None, quiet=False, version=None):
""" Compute variable given by the argument 'name' only if this variable
has not been computed before.
Check first if this variable belongs to dataset1 or dataset2.
dataset_pool holds available datasets.
"""
if not isinstance(name, VariableName):
variable_name = VariableName(name)
else:
variable_name = name
short_name = variable_name.get_alias()
if (short_name in self.get_attribute_names()) and (self.are_dependent_variables_up_to_date(
variable_name, version=version)):
return version #nothing to be done
dataset_name = variable_name.get_dataset_name()
if dataset_name == self.get_dataset_name():
new_version = self._compute_one_variable(variable_name, dataset_pool, resources)
else:
owner_dataset, index = self.get_owner_dataset_and_index(dataset_name)
if owner_dataset is None:
self._raise_error(StandardError, "Cannot find variable '%s'\nin either dataset or in the interaction set." %
variable_name.get_expression())
owner_dataset.compute_variables([variable_name], dataset_pool, resources=resources, quiet=True)
new_version = self.add_attribute(data = owner_dataset.get_attribute_by_index(variable_name, index),
name = variable_name, metadata = AttributeType.COMPUTED)
attribute_box = owner_dataset._get_attribute_box(variable_name)
variable = attribute_box.get_variable_instance()
my_attribute_box = self._get_attribute_box(variable_name)
my_attribute_box.set_variable_instance(variable)
return new_version
def get_owner_dataset_and_index(self, dataset_name):
if dataset_name == self.dataset1.get_dataset_name():
return (self.dataset1, self.get_2d_index_of_dataset1())
elif dataset_name == self.dataset2.get_dataset_name():
return (self.dataset2, self.get_2d_index())
return (None, None)
def are_dependent_variables_up_to_date(self, variable_name, version):
""" Return True if the version of this variable correspond to versions of all
dependent variables, otherwise False. That is, if any of the dependent variable
must be recomput
|
jucapoco/baseSiteGanttChart
|
jcvrbaseapp/apps.py
|
Python
|
mit
| 97
| 0
|
from djang
|
o.apps import AppConfig
class JcvrbaseappConfig(AppConfig):
|
name = 'jcvrbaseapp'
|
flyingSprite/spinelle
|
common/utility/image_downloader.py
|
Python
|
mit
| 707
| 0.004243
|
from common.utility.utils import FileUtils
default_resource_path = '/Users/Fernando/Develop/
|
downloader'
def get_image(image_hash):
"""
Download huaban image by image hash code.
Such as get_image('3058ff7398b8b725f436c6c7d56f60447468034d2347b-fGd8hd')
:param image_hash: Image hash code.
:return: None
"""
# Download normal auto size iamge.
url_normal = f'http://img.hb.aicdn.com/{image_hash}'
FileUtils.save_fil
|
e(url_normal, f'{default_resource_path}/normal/{image_hash}.jpg')
# Download 236px width size iamge.
url_fw236 = f'http://img.hb.aicdn.com/{image_hash}_fw236'
FileUtils.save_file(url_fw236, f'{default_resource_path}/fw236/{image_hash}.jpg')
|
saltzm/yadi
|
tests/test_yadi.py
|
Python
|
bsd-3-clause
| 367
| 0.00545
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_yadi
---------------------------
|
-------
Tests for `yadi` module.
"""
import unittest
from yadi import yadi
class TestYadi(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest
|
.main()
|
wasade/qiime
|
qiime/truncate_reverse_primer.py
|
Python
|
gpl-2.0
| 7,049
| 0.000284
|
#!/usr/bin/env python
# File created February 29, 2012
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "William Walters"
__email__ = "William.A.Walters@colorado.edu"
from os.path import join, basename
from skbio.parse.sequences import parse_fasta
from skbio.sequence import DNA
from qiime.split_libraries import local_align_primer_seq
from qiime.check_id_map import process_id_map
def get_rev_primer_seqs(mapping_fp):
""" Parses mapping file to get dictionary of SampleID:Rev primer
mapping_fp: mapping filepath
"""
hds, mapping_data, run_description, errors, warnings = \
process_id_map(mapping_fp, has_barcodes=False,
disable_primer_check=True)
if errors:
for curr_err in errors:
if curr_err.startswith("Duplicate SampleID"):
raise ValueError('Errors were found with mapping file, ' +
'please run validate_mapping_file.py to ' +
'identify problems.')
# create dict of dicts with SampleID:{each header:mapping data}
id_map = {}
for curr_data in mapping_data:
id_map[curr_data[0]] = {}
for header in range(len(hds)):
for curr_data in mapping_data:
id_map[curr_data[0]][hds[header]] = curr_data[header]
reverse_primers = {}
for curr_id in id_map.keys():
try:
reverse_primers[curr_id] =\
[str(DNA(curr_rev_primer).rc()) for curr_rev_primer in
id_map[curr_id]['ReversePrimer'].split(',')]
except KeyError:
raise KeyError("Reverse primer not found in mapping file, " +
"please include a 'ReversePrimer' column.")
# Check for valid reverse primers
# Will have been detected as warnings from mapping file
for curr_err in errors:
if curr_err.startswith("Invalid DNA sequence detected"):
raise ValueError("Problems found with reverse primers, please " +
"check mapping file with validate_mapping_file.py")
return reverse_primers
def get_output_filepaths(output_dir,
fasta_fp):
""" Returns output fasta filepath and log filepath
fasta_fp: fasta filepath
output_dir: output directory
"""
fasta_extensions = ['.fa', '.fasta', '.fna']
curr_fasta_out = basename(fasta_fp)
for fasta_extension in fasta_extensions:
curr_fasta_out = curr_fasta_out.replace(fasta_extension, '')
curr_fasta_out += "_rev_primer_truncated.fna"
output_fp = join(output_dir, curr_fasta_out)
log_fp = join(output_dir, "rev_primer_truncation.log")
return output_fp, log_fp
def truncate_rev_primers(fasta_f,
output_fp,
reverse_primers,
truncate_option='truncate_only',
primer_mismatches=2):
""" Locally aligns reverse primers, trucates or removes seqs
fasta_f: open file of fasta file
output_fp: open filepath to write truncated fasta to
reverse_primers: dictionary of SampleID:reverse primer sequence
truncate_option: either truncate_only, truncate_remove
primer_mismatches: number of allowed primer mismatches
"""
log_data = {
'sample_id_not_found': 0,
'reverse_primer_not_found': 0,
'total_seqs': 0,
'seqs_written': 0
}
for label, seq in parse_fasta(fasta_f):
curr_label = label.split('_')[0]
log_data['total_seqs'] += 1
# Check fasta label for valid SampleID, if not found, just write seq
try:
curr_rev_primer = reverse_primers[curr_label]
except KeyError:
|
log_data['sample_id_not_found'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
log_data['seqs_written'] += 1
continue
mm_tests = {}
for rev_primer in curr_rev_primer:
rev_primer
|
_mm, rev_primer_index =\
local_align_primer_seq(rev_primer, seq)
mm_tests[rev_primer_mm] = rev_primer_index
rev_primer_mm = min(mm_tests.keys())
rev_primer_index = mm_tests[rev_primer_mm]
if rev_primer_mm > primer_mismatches:
if truncate_option == "truncate_remove":
log_data['reverse_primer_not_found'] += 1
else:
log_data['reverse_primer_not_found'] += 1
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq))
else:
# Check for zero seq length after truncation, will not write seq
if rev_primer_index > 0:
log_data['seqs_written'] += 1
output_fp.write('>%s\n%s\n' % (label, seq[0:rev_primer_index]))
return log_data
def write_log_file(log_data,
log_f):
""" Writes log file
log_data: dictionary of details about reverse primer removal
log_f: open filepath to write log details
"""
log_f.write("Details for removal of reverse primers\n")
log_f.write("Original fasta filepath: %s\n" % log_data['fasta_fp'])
log_f.write("Total seqs in fasta: %d\n" % log_data['total_seqs'])
log_f.write("Mapping filepath: %s\n" % log_data['mapping_fp'])
log_f.write("Truncation option: %s\n" % log_data['truncate_option'])
log_f.write("Mismatches allowed: %d\n" % log_data['primer_mismatches'])
log_f.write("Total seqs written: %d\n" % log_data['seqs_written'])
log_f.write("SampleIDs not found: %d\n" % log_data['sample_id_not_found'])
log_f.write("Reverse primers not found: %d\n" %
log_data['reverse_primer_not_found'])
def truncate_reverse_primer(fasta_fp,
mapping_fp,
output_dir=".",
truncate_option='truncate_only',
primer_mismatches=2):
""" Main program function for finding, removing reverse primer seqs
fasta_fp: fasta filepath
mapping_fp: mapping filepath
output_dir: output directory
truncate_option: truncation option, either truncate_only, truncate_remove
primer_mismatches: Number is mismatches allowed in reverse primer"""
reverse_primers = get_rev_primer_seqs(open(mapping_fp, "U"))
output_fp, log_fp = get_output_filepaths(output_dir, fasta_fp)
log_data = truncate_rev_primers(open(fasta_fp, "U"),
open(
output_fp, "w"), reverse_primers, truncate_option,
primer_mismatches)
log_data['fasta_fp'] = fasta_fp
log_data['mapping_fp'] = mapping_fp
log_data['truncate_option'] = truncate_option
log_data['primer_mismatches'] = primer_mismatches
write_log_file(log_data, open(log_fp, "w"))
|
valtech-mooc/edx-platform
|
lms/djangoapps/bulk_email/tests/test_course_optout.py
|
Python
|
agpl-3.0
| 4,696
| 0.003012
|
# -*- coding: utf-8 -*-
"""
Unit tests for student optouts from course email
"""
import json
from mock import patch, Mock
from django.core import mail
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.conf import settings
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from student.models import CourseEnrollment
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
class TestOptoutCourseEmails(ModuleStoreTestCase):
"""
Test that optouts are referenced in sending course email.
"""
def setUp(self):
super(TestOptoutCourseEmails, self).setUp()
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
self.instructor = AdminFac
|
tory.create()
self.student = UserFactory.create()
CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
self.client.login(username=self.student.username, password="test")
self.send_mail_url = reverse('send_
|
email', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.success_content = {
'course_id': self.course.id.to_deprecated_string(),
'success': True,
}
def navigate_to_email_view(self):
"""Navigate to the instructor dash's email view"""
# Pull up email view on instructor dashboard
url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.get(url)
email_section = '<div class="vert-left send-email" id="section-send-email">'
# If this fails, it is likely because ENABLE_INSTRUCTOR_EMAIL is set to False
self.assertTrue(email_section in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_optout_course(self):
"""
Make sure student does not receive course email after opting out.
"""
url = reverse('change_email_settings')
# This is a checkbox, so on the post of opting out (that is, an Un-check of the box),
# the Post that is sent will not contain 'receive_emails'
response = self.client.post(url, {'course_id': self.course.id.to_deprecated_string()})
self.assertEquals(json.loads(response.content), {'success': True})
self.client.logout()
self.client.login(username=self.instructor.username, password="test")
self.navigate_to_email_view()
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Assert that self.student.email not in mail.to, outbox should be empty
self.assertEqual(len(mail.outbox), 0)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_optin_course(self):
"""
Make sure student receives course email after opting in.
"""
url = reverse('change_email_settings')
response = self.client.post(url, {'course_id': self.course.id.to_deprecated_string(), 'receive_emails': 'on'})
self.assertEquals(json.loads(response.content), {'success': True})
self.client.logout()
self.assertTrue(CourseEnrollment.is_enrolled(self.student, self.course.id))
self.client.login(username=self.instructor.username, password="test")
self.navigate_to_email_view()
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Assert that self.student.email in mail.to
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].to), 1)
self.assertEquals(mail.outbox[0].to[0], self.student.email)
|
praw-dev/praw
|
praw/models/trophy.py
|
Python
|
bsd-2-clause
| 1,987
| 0.001007
|
"""Represent the :class:`.Trophy` class."""
from typing import TYPE_CHECKING, Any, Dict, Union
from .base import PRAWBase
if TYPE_CHECKING: # pragma: no cover
import praw
class Trophy(PRAWBase):
"""Represent a trophy.
End users should not instantiate this class directly. :meth:`.Redditor.trophies` can
be used to get a list of the redditor's trophies.
|
.. include:: ../../typical_attributes.rst
=============== ===================================================
Attribute Description
=============== ===================================================
``award_id`` The ID of the trophy (sometimes ``None``).
``description`` The description
|
of the trophy (sometimes ``None``).
``icon_40`` The URL of a 41x41 px icon for the trophy.
``icon_70`` The URL of a 71x71 px icon for the trophy.
``name`` The name of the trophy.
``url`` A relevant URL (sometimes ``None``).
=============== ===================================================
"""
def __init__(self, reddit: "praw.Reddit", _data: Dict[str, Any]):
"""Initialize a :class:`.Trophy` instance.
:param reddit: An instance of :class:`.Reddit`.
:param _data: The structured data, assumed to be a dict and key ``"name"`` must
be provided.
"""
assert isinstance(_data, dict) and "name" in _data
super().__init__(reddit, _data=_data)
def __eq__(self, other: Union["Trophy", Any]) -> bool:
"""Check if two Trophies are equal."""
if isinstance(other, self.__class__):
return self.name == other.name
return super().__eq__(other)
def __str__(self) -> str:
"""Return a name of the trophy."""
return self.name # pylint: disable=no-member
def __repr__(self) -> str:
"""Return an object initialization representation of the instance."""
return f"{self.__class__.__name__}(name={self.name!r})"
|
PepperPD/edx-pepper-platform
|
lms/djangoapps/instructor_task/tasks_helper.py
|
Python
|
agpl-3.0
| 18,210
| 0.005327
|
"""
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
from time import time
from sys import exc_info
from traceback import format_exc
from celery import current_task
from celery.utils.log import get_task_logger
from celery.signals import worker_process_init
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.db import transaction
from dogapi import dog_stats_api
from xmodule.modulestore.django import modulestore
from track.views import task_track
from courseware.models import StudentModule
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_task.models import InstructorTask, PROGRESS
# define different loggers for use within tasks and on client side
TASK_LOG = get_task_logger(__name__)
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""Stub to make it easier to test without actually running Celery"""
return current_task
def _perform_module_state_update(course_id, module_state_key, student_identifier, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
# get start time for task:
start_time = time()
# find the problem descriptor:
module_descriptor = modulestore().get_instance(course_id, module_state_key)
# find the module in question
modules_to_update = StudentModule.objects.filter(course_id=course_id,
module_state_key=module_state_key)
# give the option of rescoring an individual student. If not specified,
# then rescores all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
# perform the main loop
num_updated = 0
num_attempted = 0
num_total = modules_to_update.count()
def get_task_progress():
"""Return a dict containing info about current task"""
current_time = time()
progress = {'action_name': action_name,
'attempted': num_attempted,
'updated': num_updated,
'total': num_total,
'duration_ms': int((current_time - start_time) * 1000),
}
return progress
task_progress = get_task_progress()
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
for module_to_update in modules_to_update:
num_attempted += 1
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=['action:{name}'.format(name=action_name)]):
if update_fcn(module_descriptor, module_to_update, xmodule_instance_args):
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
num_updated += 1
# update task status:
task_progress = get_task_progress()
_get_current_task().update_state(state=PROGRESS, meta=task_progress)
return task_progress
def update_problem_module_state(entry_id, update_fcn, action_name, filter_fcn,
xmodule_instance_args):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This function
updates the entry on success and failure of the _perform_module_state_update function it
wraps. It is setting the entry's value for task_state based on what Celery would set it to once
the task returns to Celery: FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to _perform_module_state_update, and documented there.
If no exceptions are raised, a dict containing the task's result is returned, with the following keys:
'attempted': number of attempts made
'updated': number of attempts that "succeeded"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Before returning, this is also JSON-serialized and stored in the task_output column of the InstructorTask entry.
If an exception is raised internally, it is caught and recorded in the InstructorTask entry.
This is also a JSON-serialized dict, stored in the task_output column, c
|
ontaining the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback inf
|
ormation (truncated if necessary)
Once the exception is caught, it is raised again and allowed to pass up to the
task-running level, so that it can also set the failure modes and capture the error trace in the
result object that Celery creates.
"""
# get the InstructorTask to be updated. If this fails, then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get
|
Manewing/pyAmbient
|
pyambient.py
|
Python
|
mit
| 7,575
| 0.00462
|
#!/usr/bin/python
from pygame import mixer
from threading import Timer
from random import randint
from xml.etree import ElementTree as XmlEt
import argparse
from utils import LOGGER
from sounds import SoundPool
# @brief constrain - constrains x to interval [mi, ma]
def constrain(x, mi, ma):
return min(ma, max(mi, x))
# @class AmbientSound
# @brief wrapper class around class sounds.Sound, handles updates
# of volume and stores ambient sound configuration
class AmbientSound(object):
# @brief constructor
# @param[in] sound - the sound object to wrap around
# @param[in] base - the base volume of the sound
# @param[in] drift - the volume drift of the sound
# @param[in] rate_min - the minimal update rate of the sound
# @param[in] rate_max - the maximal update rate of the sound
def __init__(self, sound, base, drift, rate_min, rate_max):
self.sound = sound
self.base = base
self.drift = drift
self.rate_min = rate_min
self.rate_max = rate_max
self.rate = (1 - randint(0,1)*2)
# check base and drift values
if base - drift < 0.0 or base + drift > 1.0:
raise ValueError("Volume base +/- drift exceeds boundaries [0.0,1.0]")
# initialize rate
self.newRate()
# initialize sound
self.sound.setVolume(self.base)
# @brief newRate - sets new random rate with opposite sign than before
def newRate(self):
if self.rate > 0:
self.rate = -float(randint(self.rate_min, self.rate_max))
else:
self.rate = float(randint(self.rate_min, self.rate_max))
# @brief adaptVolume - adapts the sound volume by 'drift/rate'
def adaptVolume(self):
vol = self.sound.getVolume() + self.drift / self.rate
max_vol = self.base + self.drift
min_vol = self.base - self.drift
# check new volume
if vol >= max_vol or vol <= min_vol:
vol = constrain(vol, min_vol, max_vol)
self.newRate()
# set new volume
self.sound.setVolume(vol)
# @brief play - starts sound playing
def play(self):
self.sound.play()
# @brief stop - stops sound playing
def stop(self):
self.sound.stop()
# @class Ambient
# @brief an ambient consisting of different sound files
class Ambient(object):
# @brief constructor
# @param[in] configfile - the configuration file of the ambient
# @param[in] spool - the sound pool the ambient should use
def __init__(self, configfile, spool = SoundPool()):
# load configuration file
with open(configfile, "r") as f:
data = f.read()
root = XmlEt.fromstring(data).find("Ambient")
# set the name of the ambient
self.name = root.get("name")
LOGGER.logInfo("Ambient '{}'".format(self.name))
# set the update rate from the volatility
self.urate = 1.0 / float(root.get("volatility"))
self.urate = constrain(self.urate, 0.0, 5.0)
# flag indicating whether ambient is currently running
self.loaded = False
self.running = False
# load sounds and sound configuration
self.sounds = list()
|
self.spool = spool
for soundcfg in root.findall("Sound"):
sfile = soundcfg.get("file")
|
base = float(soundcfg.get("base"))
drift = float(soundcfg.get("drift"))
self.sounds.append((sfile, base, drift))
LOGGER.logInfo("'{}': [{}] +/- ({})".format(sfile, base, drift))
# @brief __load - loads the actual ambient, delayed until it is started
def __load(self):
sounds = list()
for soundcfg in self.sounds:
sfile, base, drift = soundcfg
# load sound from sound pool and initialize it
sound = self.spool.get(sfile)
sounds.append(AmbientSound(sound, base, drift, 4, 16))
# reset sounds, original only stored configuration
self.sounds = sounds
self.loaded = True
# @brief __update - internal update function, adapts the volumes of all
# sounds
#
# Note: If ambient is running this function schedules itself with
# period 'self.urate'
#
def __update(self):
if not self.running:
return
LOGGER.logDebug("'{}' update".format(self.name))
for sound in self.sounds:
sound.adaptVolume()
Timer(self.urate, self.__update).start()
# @brief getName - returns the configured name of the ambient
def getName(self):
return self.name
# @brief start - starts playback of ambient
def start(self):
if not self.loaded:
self.__load()
LOGGER.logInfo("'{}' start".format(self.name))
for sound in self.sounds:
sound.play()
# indicate start
self.running = True
self.__update()
# @brief stop - stops playback of ambient
def stop(self):
if not self.loaded:
return
LOGGER.logInfo("'{}' stop".format(self.name))
for sound in self.sounds:
sound.stop()
# indicate stop
self.running = False
# @class AmbientControl
# @brief Handles a set of configured ambients
class AmbientControl(object):
# @brief constructor
# @param[in] configfile - a pyAmbient configuration file
def __init__(self, configfile):
# check if mixer is already initialized
if mixer.get_init() == True:
raise RuntimeError("pygame.mixer already initialized, abort")
LOGGER.logDebug("initialize pygame.mixer")
# set parameters of mixer before init, TODO check values again
mixer.pre_init(44100, -16, 2, 2048)
mixer.init()
# load configuration file
with open(configfile, "r") as f:
data = f.read()
root = XmlEt.fromstring(data)
# setup ambient dictionary
self.ambients = dict()
for elem in root.findall("AmbientConfig"):
self.ambients[elem.get("id")] = Ambient(elem.get("file"))
# set current ambient to none
self.ambient = None
# @brief getAmbients - get the configured ambients
def getAmbients(self):
return self.ambients
# @brief get - get the current ambient, None if none selected
def get(self):
return self.ambient
# @brief switch - switches to ambient with given ID
# @param[in] ambient_id - ID of the ambient to switch to
def switch(self, ambient_id):
if self.ambient != None:
self.ambient.stop()
# switch to new ambient
self.ambient = self.ambients[ambient_id]
LOGGER.logInfo("Switched to ambient '{}'".format(self.ambient.getName()))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pyAmbient")
parser.add_argument("-c", "--config", dest="config", required=True,
help="the pyAmbient configuration file to load")
parser.add_argument("-a", "--ambient", dest="ambient", required=True,
help="the ambient ID of the ambient to start")
parser.add_argument("-d", "--debug", dest="debug", required=False,
help="if to log debug information", default=False, action="store_true")
args = parser.parse_args()
if args.debug == True:
LOGGER.setLevel(0)
else:
LOGGER.setLevel(1)
ambc = AmbientControl(args.config)
ambc.switch(args.ambient)
ambc.get().start()
|
openstack/tacker
|
tacker/vnfm/monitor_drivers/ping/ping.py
|
Python
|
apache-2.0
| 3,250
| 0.000615
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
from tacker._i18n import _
from tacker.agent.linux import utils as linux_utils
from tacker.common import log
from tacker.vnfm.monitor_drivers import abstract_driver
LOG = logging.getLogger(__name__)
OPTS = [
cfg.IntOpt('count', default=5,
help=_('Number of ICMP packets to send')),
cfg.FloatOpt('timeout', default=5,
help=_('Number of seconds to wait for a response')),
cfg.FloatOpt('interval', default=1,
help=_('Number of seconds to wait between packe
|
ts')),
cfg.IntOpt('retry', default=1,
help=_('Number of ping retries'))
]
cfg.CONF.register_opts(OPTS, 'monitor_ping')
def config_
|
opts():
return [('monitor_ping', OPTS)]
class VNFMonitorPing(abstract_driver.VNFMonitorAbstractDriver):
def get_type(self):
return 'ping'
def get_name(self):
return 'ping'
def get_description(self):
return 'Tacker VNFMonitor Ping Driver'
def monitor_url(self, plugin, context, vnf):
LOG.debug('monitor_url %s', vnf)
return vnf.get('monitor_url', '')
def _is_pingable(self, mgmt_ip="", count=None, timeout=None,
interval=None, retry=None, **kwargs):
"""Checks whether an IP address is reachable by pinging.
Use linux utils to execute the ping (ICMP ECHO) command.
Sends 5 packets with an interval of 1 seconds and timeout of 1
seconds. Runtime error implies unreachability else IP is pingable.
:param ip: IP to check
:return: bool - True or string 'failure' depending on pingability.
"""
cmd_ping = 'ping'
if netaddr.valid_ipv6(mgmt_ip):
cmd_ping = 'ping6'
if not count:
count = cfg.CONF.monitor_ping.count
if not timeout:
timeout = cfg.CONF.monitor_ping.timeout
if not interval:
interval = cfg.CONF.monitor_ping.interval
if not retry:
retry = cfg.CONF.monitor_ping.retry
ping_cmd = [cmd_ping,
'-c', count,
'-W', timeout,
'-i', interval,
mgmt_ip]
for retry_range in range(int(retry)):
try:
linux_utils.execute(ping_cmd, check_exit_code=True)
return True
except RuntimeError:
LOG.warning("Cannot ping ip address: %s", mgmt_ip)
return 'failure'
@log.log
def monitor_call(self, vnf, kwargs):
if not kwargs['mgmt_ip']:
return
return self._is_pingable(**kwargs)
|
Q2MM/q2mm
|
q2mm/filetypes.py
|
Python
|
mit
| 123,627
| 0.002912
|
#!/usr/bin/env python
"""
Handles importing data from the various filetypes that Q2MM uses.
Schrodinger
-----------
When importing Schrodinger files, if the atom.typ file isn't in the directory
where you execute the Q2MM Python scripts, you may see this warning:
WARNING mmat_get_atomic_num x is not a valid atom type
WARNING mmat_get_mmod_name x is not a valid atom type
In this example, x is the number of a custom atom type defined and added to
atom.typ. The warning can be ignored. If it's bothersome, copy atom.typ into
the directory where you execute the Q2MM Python scripts.
Note that the atom.typ must be located with your structure files, else the
Schrodinger jobs will fail.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from argparse import RawTextHelpFormatter
from string import digits
import logging
import mmap
import numpy as np
import math
import os
import re
import subprocess as sp
import time
import sys
try:
from schrodinger import structure as sch_str
from schrodinger.application.jaguar import input as jag_in
except:
print("Schrodinger not installed, limited functionality")
pass
import constants as co
import datatypes
logger = logging.getLogger(__name__)
# Print out full matrices rather than having Numpy truncate them.
# np.nan seems to no longer be supported for untruncated printing
# of arrays. The suggestion is to use sys.maxsize but I haven't checked
# that this works for python2 so leaving the commented code for now.
# np.set_printoptions(threshold=np.nan)
np.set_printoptions(threshold=sys.maxsize)
class File(object):
"""
Base for every other filetype class.
"""
def __init__(self, path):
self._lines = None
self.path = os.path.abspath(path)
# self.path = path
self.directory = os.path.dirname(self.path)
self.filename = os.path.basename(self.path)
# self.name = os.path.splitext(self.filename)[0]
@property
def lines(self):
if self._lines is None:
with open(self.path, 'r') as f:
self._lines = f.readlines()
return self._lines
def write(self, path, lines=None):
if lines is None:
lines = self.lines
with open(path, 'w') as f:
for line in lines:
f.write(line)
class AmberInput(File):
"""
Some sort of generic class for Amber shell scripts.
"""
SCRIPT_ENERGY = \
"""Energy of current thingy
&cntrl
ig=-1,
imin=1,
ncyc=0, maxcyc=0,
ntb=1,
&end
"""
def __init__(self, path):
super(AmberInput, self).__init__(path)
def run(self, path=None, inpcrd=None, prmtop=None, **kwargs):
# Added `**kwargs` to deal with this varying from the MacroModel run
# command.
with open('AMBER_TEMP.in', 'w') as f:
f.writelines(self.SCRIPT_ENERGY)
if not path:
# Seriously, this doesn't matter.
path = self.path
if not inpcrd:
inpcrd = os.path.join(self.directory, self.inpcrd)
if not prmtop:
prmtop = os.path.join(self.directory, self.prmtop)
# Could use inpcrd as the basis for the output filename?
path = os.path.splitext(prmtop)
path, ext = path[0], path[1]
self.out = path + '.out'
self.rst = path + '.rst'
# sp.call(
# 'sander -O -i AMBER_TEMP.in -o {} -c {} -p {} -r {} -ref {}'.format(
# self.out, inpcrd, prmtop, self.rst, inpcrd),
# shell=True)
class AmberOut(File):
"""
Some sort of generic class for Amber output files.
"""
LINE_HEADER = '\s+NSTEP\s+ENERGY\s+RMS\s+GMAX\s+NAME\s+NUMBER[\s+]?\n+'
def __init__(self, path):
super(AmberOut, self).__init__(path)
def read_energy(self, path=None):
if not path:
path = self.path
logger.log(1, '>>> path: {}'.format(path))
with open(path, 'r') as f:
string = f.read()
# No idea if this will find more than just this one. Sure hope not!
# I'd double check those energies.
# something = re.findall(
# '\s+FINAL\sRESULTS\s+\n+{}\s+{}\s+' \
# '(?P<energy>{})'.format(self.LINE_HEADER, co.RE_FLOAT, co.RE_FLOAT),
# string,
# re.MULTILINE)
# if something:
# logger.log(1, '>>> something: {}'.format(something))
# energy = float(something[-1])
# logger.log(1, '>>> energy: {}'.format(energy))
# return energy
# else:
# raise Exception("Awww bummer! I can't find the energy "
# "in {}!".format(path))
# Here's an iterative version.
re_compiled = re.compile('FINAL\sRESULTS\s+\n+{}\s+{}\s+'
'(?P<energy>{})'.format(
self.LINE_HEADER, co.RE_FLOAT, co.RE_FLOAT))
somethings = [x.groupdict() for x in re_compiled.finditer(string)]
if somethings:
logger.log(1, '>>> somethings: {}'.format(somethings))
energy = float(somethings[-1]['energy'])
logger.
|
log(1, '>>> energy: {}'.format(energy))
return energy
else:
raise Exception("Awww bummer! I can't find the energy "
"in {}!".format(path))
class TinkerHess(File):
def __init__(self, path):
super(TinkerHess, self).__init__(path)
self._hessian = None
self.natoms = None
@property
def hessian(self):
if self._hessian is None:
|
logger.log(10, 'READING: {}'.format(self.filename))
hessian = np.zeros([self.natoms * 3, self.natoms * 3], dtype=float)
logger.log(5, ' -- Creatting {} Hessian Matrix.'.format(
hessian.shape))
with open(self.path, 'r') as f:
lines = f.read()
words = lines.split()
diag = True
row_num = 0
col_num = 0
line = -1
index = 0
for i, word in enumerate(words):
match = re.compile('\d+[.]\d+').search(word)
# First group of values are all of the diagonal elements. So
# This will grab them first and put them in the correct index
# of the Hessian.
if diag and match:
hessian[row_num, col_num] = word
row_num += 1
col_num += 1
# After the first group of values the line will read
# 'Off-diagonal'. This signifies when the next elements are
# H_i, j for section i.
if word == 'Off-diagonal':
diag = False
line += 1
index = line + 1
row_num = 0
col_num = 0
if not diag and match:
hessian[line, col_num + index] = word
hessian[row_num + index, line] = word
row_num += 1
col_num += 1
# Convert hessian units to use kJ/mol instead of kcal/mol.
self._hessian = hessian / co.HARTREE_TO_KCALMOL \
* co.HARTREE_TO_KJMOL
logger.log(5, ' -- Finished Creating {} Hessian matrix.'.format(
hessian.shape))
return self._hessian
class TinkerLog(File):
def __init__(self, path):
super(TinkerLog, self).__init__(path)
self._structures = None
self.name = None
@property
def structures(self):
if self._structures == None:
logger.log(10, 'READING: {}'.format(self.filename))
self._structures = []
with open(self.path, 'r') as f:
sections = {'sp':1, 'minimization':2, 'hessian':2}
count_previous = 0
calc_section = 'sp'
for line in f:
count_current = sections[calc_section]
if count_current != count_previous:
# Due to TINKER printin
|
superdesk/superdesk-core
|
tests/enqueue_test.py
|
Python
|
agpl-3.0
| 1,353
| 0.002217
|
from unittest.mock import patch
from superdesk.tests import TestCase
from apps.publish.enqueue.enqueue_service import EnqueueService
class NoTakesEnqueueTestCase(TestCase):
def setUp(self):
super().setUp()
self.product_ids = self.app.data.insert(
"products",
[
{"name": "all"},
],
)
|
self.subscriber_ids = self.app.data.insert(
"subscribers",
[
{"name": "digi", "subscriber_type": "digital", "is_targetable": True, "products": self.product_ids},
],
)
self.desk_ids = self.app.data.insert(
"desks",
[
{"name": "sports"},
],
)
self.service = EnqueueService()
|
def test_resend_no_takes(self):
doc = {"_id": "test"}
subscribers = [s for s in self.app.data.find_all("subscribers")]
subscriber_codes = self.service._get_subscriber_codes(subscribers)
with patch.object(self.service, "_resend_to_subscribers") as resend:
with patch.object(self.service, "publish_content_api") as content_api:
self.service.resend(doc, subscribers)
resend.assert_called_with(doc, subscribers, subscriber_codes, {})
content_api.assert_called_with(doc, [])
|
arista-eosext/rphm
|
setup.py
|
Python
|
bsd-3-clause
| 3,002
| 0.001332
|
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
''' The setup script is the center of all activity in building,
distributing, and installing modules using the Distutils. The
main purpose of the setup script is to describe your module
distribution to the Distutils, so that
|
the various commands
that operate on your modules do the right thing.
'''
import os
from glob import glob
from setuptools import setup, find_packages
from rphm import __version__, __author__
def find_modules(pkg):
''' Find the modules that belong in this package. '''
modules = [pkg]
for dirname, dirnames, _ in os.walk(pkg):
for subdirname in dirnames:
modules.append(os.p
|
ath.join(dirname, subdirname))
return modules
INSTALL_ROOT = os.getenv('VIRTUAL_ENV', '')
CONF_PATH = INSTALL_ROOT + '/persist/sys'
INSTALL_REQUIREMENTS = [
'jsonrpclib'
]
TEST_REQUIREMENTS = [
'mock'
]
setup(
name='rphm',
version=__version__,
description='EOS extension to generate SNMP traps based on counter thresholds',
long_description=open('README.md').read(),
author=__author__,
author_email='eosplus-dev@arista.com',
url='http://eos.arista.com',
license='BSD-3',
install_requires=INSTALL_REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
packages=find_modules('rphm'),
scripts=glob('bin/*'),
data_files=[
(CONF_PATH, ['conf/rphm.conf'])
]
)
|
ooz/ICFP2015
|
src/game.py
|
Python
|
mit
| 8,981
| 0.001225
|
#!/usr/bin/python
# coding: utf-8
import copy
import json
from lcg import LCG
class Game(object):
def __init__(self, json_file):
super(Game, self).__init__()
with open(json_file) as f:
json_data = json.load(f)
self.ID = json_data["id"]
self.units = [Unit(json_unit) for json_unit in json_data["units"]]
self.width = json_data["width"]
self.height = json_data["height"]
self.filled = [json_cell2tuple(json_c) for json_c in json_data["filled"]]
self.sourceLength = json_data["sourceLength"]
self.sourceSeeds = json_data["sourceSeeds"]
self.solutions = []
def solve(self, ss_nr=0):
for ss in self.sourceSeeds:
commands = ""
board = Board(self.width, self.height, self.filled)
source = self.generate_source(ss, self.sourceLength)
while not board.is_finished() and len(source) > 0:
unit = source.pop(0)
board.spawn(unit)
if not board.is_finished():
while board.unit is not None:
commands += board.move_ei()
#commands += board.move_to_lowest_fill_east()
# Move down
commands += board.move_down()
#print "----------------------"
#print board
#print "Fill lvl: %s" % board.get_fill_level(4)
solution = {}
solution["problemId"] = self.ID
solution["seed"] = ss
solution["tag"] = "Algo v3.1"
solution["solution"] = commands
self.solutions.append(solution)
return json.dumps(self.solutions)
def generate_source(self, seed, sourceLength):
source = []
rng = LCG(seed)
for i in range(sourceLength):
unit_nr = rng.next() % len(self.units)
unit = copy.deepcopy(self.units[unit_nr])
source.append(unit)
return source
def __str__(self):
return "Game(ID:%s)" % self.ID
class Unit(object):
MV_VALID = 0 # Valid move
MV_LOCKED = 1 # Move would cause a lock
MV_INVALID = 2 # Invalid move, can't move there (out of the board etc.)
def __init__(self, json_unit):
super(Unit, self).__init__()
self.pivot = json_cell2tuple(json_unit["pivot"])
self.members = [json_cell2tuple(json_m) for json_m in json_unit["members"]]
self.pos = Cell(0, 0)
def get_topmost(self):
tm = self.members[0]
for m in self.members:
if m.y < tm.y:
tm = m
return tm
def get_leftmost(self):
lm = self.members[0]
for m in self.members:
if m.x < lm.x:
lm = m
return lm
def get_rightmost(self):
rm = self.members[0]
for m in self.members:
if m.x > rm.x:
rm = m
return rm
def can_be_placed(self, board):
for m in self.members:
try:
if board.get(self.pos.x + m.x, self.pos.y + m.y) in [1, 2, 3]:
return Unit.MV_LOCKED
except IndexError:
return Unit.MV_INVALID
return Unit.MV_VALID
def move_e(self):
self.pos.x = self.pos.x - 1
def move_w(self):
self.pos.x = self.pos.x + 1
def move_se(self):
if self.pos.y % 2 == 0:
self.pos.x = self.pos.x - 1
self.pos.y = self.pos.y + 1
def move_sw(self):
if self.pos.y % 2 == 1:
self.pos.x = self.pos.x + 1
self.pos.y = self.pos.y + 1
def turn_cw(self):
pass
def turn_ccw(self):
pass
def __str__(self):
return "Unit(pivot:%s, members:%s)" % (self.pivot, self.members)
class Cell(object):
def __init__(self, x, y):
super(Cell, self).__init__()
self.x = x
self.y = y
def __str__(self):
return "(%s, %s)" % (self.x, self.y)
def json_cell2tuple(json_cell):
return Cell(json_cell["x"], json_cell["y"])
class Board(object):
def __init__(self, width, height, filled):
super(Board, self).__init__()
self.width = width
self.height = height
self._board = [[0] * height for x in range(width)]
for full in filled:
self._board[full.x][full.y] = 1
self.unit = None
self.finished = False
def spawn(self, unit):
tm = unit.get_topmost()
lm = unit.get_leftmost()
rm = unit.get_rightmost()
pad_top = 0
pad_left = (self.width - (rm.x - lm.x + 1)) / 2
unit.pos = Cell(pad_left, pad_top)
if unit.can_be_placed(self) == Unit.MV_VALID:
self.unit = unit
else:
self.finished = True
def lock(self):
if self.unit:
for m in self.unit.members:
self._board[m.x + self.unit.pos.x][m.y + self.unit.pos.y] = 1
self.unit = None
self.clear_rows()
def clear_rows(self):
# UGLY AS HELL
for y in range(self.height)[::-1]:
while self.row_is_filled(y):
for x in range(self.width):
self._board[x][y] = 0
for yy in range(y)[::-1]:
for x in range(self.width):
self._board[x][yy + 1] = self.get(x, yy)
def row_is_filled(self, row):
summ = 0
for x in range(self.width):
summ += self.get(x, row)
if summ >= self.width:
return True
return False
def is_finished(self):
return self.finished
def get_adjacent(self, x, y):
return []
def get_fill_level(self, col):
for y in range(self.height):
if self.get(col, y) in [1]:
return y
return self.height - 1
def get(self, x, y):
return self._board[x][y]
def __str__(self):
board_copy = copy.deepcopy(self._board)
if self.unit:
for m in self.unit.members:
board_copy[m.x + self.unit.pos.x][m.y + self.unit.pos.y] = 2
buf = []
for y in range(self.height):
line = ""
if y % 2 == 1:
line = " "
for x in range(self.width):
line = line + str(board_copy[x]
|
[y]) + " "
buf.append(line)
return "\n".join(buf)
# TODO: refactor movement code
def move_e(self):
if self.unit is None:
return ""
unit_copy = copy.deepcopy(self.unit)
unit_copy.move_e()
cbp = unit_copy.can_be_placed(self)
if cbp == Unit.MV_VALID:
self.unit.move_e()
|
return "e"
elif cbp == Unit.MV_LOCKED:
self.lock()
return "c"
else:
return ""
def move_w(self):
if self.unit is None:
return ""
unit_copy = copy.deepcopy(self.unit)
unit_copy.move_w()
cbp = unit_copy.can_be_placed(self)
if cbp == Unit.MV_VALID:
self.unit.move_w()
return "!"
elif cbp == Unit.MV_LOCKED:
self.lock()
return "!"
else:
return ""
def move_se(self):
if self.unit is None:
return ""
unit_copy = copy.deepcopy(self.unit)
unit_copy.move_se()
cbp = unit_copy.can_be_placed(self)
if cbp == Unit.MV_VALID:
self.unit.move_se()
return "m"
else:
self.lock()
return "n"
def move_sw(self):
if self.unit is None:
return ""
unit_copy = copy.deepcopy(self.unit)
unit_copy.move_sw()
cbp = unit_copy.can_be_placed(self)
if cbp == Unit.MV_VALID:
self.unit.move_sw()
return "i"
else:
self.lock()
return "j"
# Macro movements
def move_down(self):
commands = ""
while self.unit is not None:
if self.unit.pos.y
|
ncdesouza/bookworm
|
env/lib/python2.7/site-packages/pip/index.py
|
Python
|
gpl-3.0
| 40,408
| 0.002203
|
"""Routines related to PyPI, indexes"""
import sys
import os
import re
import mimetypes
import posixpath
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import (DistributionNotFound, BestVersionAlreadyInstalled,
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.backwardcompat import urlparse, url2pathname
from pip.download import PipSession, url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources
from pip._vendor.requests.exceptions import SSLError
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
INSECURE_SCHEMES = {
"http": ["https"],
}
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=[], allow_unverified=[],
allow_all_external=False, allow_all_prereleases=False,
process_dependency_links=False, session=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
self._have_warned_dependency_links = False
# The Session we'll use to make requests
self.session = session or PipSession()
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
if not self._have_warned_dependency_links:
logger.deprecated(
"1.6",
"Dependency Links processing has been deprecated with an "
"accelerated time schedule and will be removed in pip 1.6",
)
self._have_warned_dependency_links = True
|
self.dependency_links.extend(links)
def _sort
|
_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == INSTALLED_VERSION:
pri = 1
elif link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel("%s is not a supported wheel for this platform. It can't be sorted." % wheel.filename)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary.
See the docstring for `_link_sort_key` for details.
This function is isolated for easier unit testing.
"""
return sorted(applicable_versions, key=self._link_sort_key, reverse=True)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_in
|
Vvucinic/Wander
|
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/core/management/commands/createcachetable.py
|
Python
|
artistic-2.0
| 4,389
| 0.00319
|
from django.conf import settings
from django.core.cache import caches
from django.core.cache.backends.db import BaseDatabaseCache
from django.core.management.base import BaseCommand, CommandError
from django.db import (
DEFAULT_DB_ALIAS, connections, models, router, transaction,
)
from django.db.utils import DatabaseError
from django.utils.encoding import force_text
class Command(BaseCommand):
help = "Creates the tables needed to use the SQL cache backend."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='table_name', nargs='*',
help='Optional table names. Otherwise, settings.CACHES is used to '
'find cache tables.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database onto which the cache tables will be '
'installed. Defaults to the "default" database.')
parser.add_argument('--dry-run', action='store_true', dest='dry_run',
help='Does not create the table, just prints the SQL that would '
'be run.')
def handle(self, *tablenames, **options):
db = options.get('database')
self.verbosity = int(options.get('verbosity'))
dry_run = options.get('dry_run')
if len(tablenames):
# Legacy behavior, tablename specified as argument
for tablename in tablenames:
self.create_table(db, tablename, dry_run)
else:
for cache_alias in settings.CACHES:
cache = caches[cache_alias]
if isinstance(cache, BaseDatabaseCache):
self.create_table(db, cache._table, dry_run)
def create_table(self, database, tablename, dry_run):
cache = BaseDatabaseCache(tablename, {})
if not router.allow_migrate_model(database, cache.cache_model_class):
return
connection = connections[database]
if tablename in connection.introspection.table_names():
if self.verbosity > 0:
self.stdout.write("Cache table '%s' already exists." % tablename)
return
fields = (
# "key" is a reserved word in MySQL, so use "cache_key" instead.
models.CharField(name='cache_key', max_length=255, unique=True, primary_key=True),
models.TextField(name='value'),
models.DateTimeField(name='expires', db_index=True),
)
table_output = []
index_output = []
qn = connection.ops.quote_name
for f in fields:
field_output = [qn(f.name), f.db_type(connection=connection)]
field_output.append("%sNULL" % ("NOT " if not f.null else ""))
|
if f.primary_key:
field_output.append("PRIMARY KEY")
elif f.unique:
field_output.append("UNIQUE")
if f.db_index:
unique = "UNIQUE " if f.unique else ""
index_output.append("CREATE %sINDEX %s ON %s (%s);" %
(unique, qn('%s_%s' % (tablename, f.name)), qn(tablename),
qn(f.name)))
table_out
|
put.append(" ".join(field_output))
full_statement = ["CREATE TABLE %s (" % qn(tablename)]
for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, ',' if i < len(table_output) - 1 else ''))
full_statement.append(');')
full_statement = "\n".join(full_statement)
if dry_run:
self.stdout.write(full_statement)
for statement in index_output:
self.stdout.write(statement)
return
with transaction.atomic(using=database,
savepoint=connection.features.can_rollback_ddl):
with connection.cursor() as curs:
try:
curs.execute(full_statement)
except DatabaseError as e:
raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s." %
(tablename, force_text(e)))
for statement in index_output:
curs.execute(statement)
if self.verbosity > 1:
self.stdout.write("Cache table '%s' created." % tablename)
|
tsl143/addons-server
|
src/olympia/reviewers/tests/test_models.py
|
Python
|
bsd-3-clause
| 63,568
| 0.000031
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import json
import mock
import time
from django.conf import settings
from django.core import mail
from olympia import amo
from olympia.abuse.models import AbuseReport
from olympia.access.mod
|
els import Group, GroupUser
from olympia.activity.models import ActivityLog
from olympia.amo.tests import TestCase
from olympia.amo.tests import (
addon_factory, file_factory, user_factory, version_factory)
from olympia.addons.models import (
Addon, AddonApprovalsCou
|
nter, AddonReviewerFlags, AddonUser)
from olympia.files.models import FileValidation
from olympia.ratings.models import Rating
from olympia.reviewers.models import Whiteboard
from olympia.versions.models import (
Version, version_uploaded)
from olympia.files.models import File, WebextPermission
from olympia.reviewers.models import (
AutoApprovalNotEnoughFilesError, AutoApprovalNoValidationResultError,
AutoApprovalSummary, RereviewQueueTheme, ReviewerScore,
ReviewerSubscription, send_notifications, set_reviewing_cache,
ViewFullReviewQueue, ViewPendingQueue, ViewUnlistedAllList)
from olympia.users.models import UserProfile
def create_search_ext(name, version_str, addon_status, file_status,
channel):
addon, created_ = Addon.objects.get_or_create(
name__localized_string=name,
defaults={'type': amo.ADDON_SEARCH, 'name': name})
version, created_ = Version.objects.get_or_create(
addon=addon, version=version_str, defaults={'channel': channel})
File.objects.create(version=version, filename=u"%s.xpi" % name,
platform=amo.PLATFORM_ALL.id, status=file_status)
# Update status *after* there are files:
addon = Addon.objects.get(pk=addon.id)
addon.update(status=addon_status)
return addon
class TestQueue(TestCase):
"""Tests common attributes and coercions that each view must support."""
__test__ = False # this is an abstract test case
def test_latest_version(self):
addon = self.new_addon()
v1 = addon.find_latest_version(self.channel)
v1.update(created=self.days_ago(2))
v1.all_files[0].update(status=amo.STATUS_PUBLIC)
version_factory(addon=addon, version='2.0', created=self.days_ago(1),
channel=self.channel,
file_kw={'status': amo.STATUS_PUBLIC})
version_factory(addon=addon, version='3.0', created=self.days_ago(0),
channel=self.channel,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
row = self.Queue.objects.get()
assert row.latest_version == '3.0'
def test_addons_disabled_by_user_are_hidden(self):
self.new_addon(version=u'0.1').update(disabled_by_user=True)
assert list(self.Queue.objects.all()) == []
def test_addons_disabled_by_admin_are_hidden(self):
self.new_addon(version=u'0.1').update(status=amo.STATUS_DISABLED)
assert list(self.Queue.objects.all()) == []
def test_reviewed_files_are_hidden(self):
self.new_addon(name='Unreviewed')
addon_factory(name='Already Reviewed')
assert sorted(q.addon_name for q in self.Queue.objects.all()) == (
['Unreviewed'])
def test_search_extensions(self):
self.new_search_ext('Search Tool', '0.1')
row = self.Queue.objects.get()
assert row.addon_name == u'Search Tool'
assert row.addon_type_id == amo.ADDON_SEARCH
def test_count_all(self):
# Create two new addons and give each another version.
version_factory(addon=self.new_addon(), version=u'2.0',
channel=self.channel)
version_factory(addon=self.new_addon(), version=u'2.0',
channel=self.channel)
assert self.Queue.objects.all().count() == 2
class TestPendingQueue(TestQueue):
__test__ = True
Queue = ViewPendingQueue
channel = amo.RELEASE_CHANNEL_LISTED
def new_addon(self, name=u'Pending', version=u'1.0'):
"""Creates an approved addon with two listed versions, one approved,
the second awaiting review."""
addon = addon_factory(
name=name,
version_kw={'version': u'0.0.1', 'channel': self.channel,
'created': self.days_ago(1)})
version_factory(
addon=addon, version=version, channel=self.channel,
file_kw={'status': amo.STATUS_AWAITING_REVIEW,
'is_restart_required': False})
return addon
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_PUBLIC, amo.STATUS_AWAITING_REVIEW,
channel=self.channel, **kw)
def test_waiting_time(self):
self.new_addon()
Version.objects.update(created=datetime.utcnow())
row = self.Queue.objects.all()[0]
assert row.waiting_time_days == 0
# Time zone will be off, hard to test this.
assert row.waiting_time_hours is not None
def test_flags_needs_admin_code_review(self):
AddonReviewerFlags.objects.create(
addon=self.new_addon(), needs_admin_code_review=True)
q = self.Queue.objects.get()
assert q.flags == [
('needs-admin-code-review', 'Needs Admin Code Review')]
def test_flags_info_request(self):
self.new_addon().find_latest_version(self.channel).update(
has_info_request=True)
q = self.Queue.objects.get()
assert q.flags == [('info', 'More Information Requested')]
def test_flags_reviewer_comment(self):
self.new_addon().find_latest_version(self.channel).update(
has_reviewer_comment=True)
q = self.Queue.objects.get()
assert q.flags == [('reviewer', 'Contains Reviewer Comment')]
def test_flags_jetpack(self):
self.new_addon().find_latest_version(self.channel).all_files[0].update(
jetpack_version='1.8')
q = self.Queue.objects.get()
assert q.flags == [('jetpack', 'Jetpack Add-on')]
def test_flags_is_restart_required(self):
self.new_addon().find_latest_version(self.channel).all_files[0].update(
is_restart_required=True)
q = self.Queue.objects.get()
assert q.flags == [('is_restart_required', 'Requires Restart')]
def test_flags_sources_provided(self):
self.new_addon().find_latest_version(self.channel).update(
source='/some/source/file')
q = self.Queue.objects.get()
assert q.flags == [('sources-provided', 'Sources provided')]
def test_flags_webextension(self):
self.new_addon().find_latest_version(self.channel).all_files[0].update(
is_webextension=True)
queue = self.Queue.objects.get()
assert queue.flags == [('webextension', 'WebExtension')]
def test_no_flags(self):
self.new_addon()
q = self.Queue.objects.get()
assert q.flags == []
class TestFullReviewQueue(TestQueue):
__test__ = True
Queue = ViewFullReviewQueue
channel = amo.RELEASE_CHANNEL_LISTED
def new_addon(self, name=u'Nominated', version=u'1.0',
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW):
addon = addon_factory(
name=name, status=addon_status,
version_kw={'version': version, 'channel': self.channel},
file_kw={'status': file_status})
return addon
def new_search_ext(self, name, version, **kw):
return create_search_ext(name, version,
amo.STATUS_NOMINATED,
amo.STATUS_AWAITING_REVIEW,
channel=self.channel, **kw)
def test_waiting_time(self):
self.new_addon()
Version.objects.update(nomination=datetime.utcnow())
row = self.Queue.objects.all()[0]
assert row.waiting_time_days == 0
# Time zone will be off, hard to test this.
assert row.waiting_time_hours is
|
michelp/xodb
|
xodb/snowball/french/__init__.py
|
Python
|
mit
| 2,426
| 0
|
# -*- coding: utf-8 -*-
stopwords = """
| A French stop word list. Comments begin with vertical bar. Each stop
| word is at the start of a line.
au | a + le
aux | a + les
avec | with
ce | this
ces | these
dans | with
de | of
des | de + les
du | de + le
elle | she
en | `of them' etc
et | and
eux | them
il | he
je | I
la | the
le | the
leur | their
lui | him
ma | my (fe
|
m)
mais | but
me | me
même | same; as in moi-même (myself) etc
mes | me (pl)
moi | me
mon | my (masc)
ne | not
nos | our (pl)
notre | our
nous | we
on | one
ou | where
par | by
pas | not
pour | for
qu | que before vow
|
el
que | that
qui | who
sa | his, her (fem)
se | oneself
ses | his (pl)
son | his, her (masc)
sur | on
ta | thy (fem)
te | thee
tes | thy (pl)
toi | thee
ton | thy (masc)
tu | thou
un | a
une | a
vos | your (pl)
votre | your
vous | you
| single letter forms
c | c'
d | d'
j | j'
l | l'
à | to, at
m | m'
n | n'
s | s'
t | t'
y | there
| forms of être (not including the infinitive):
été
étée
étées
étés
étant
étante
étants
étantes
suis
es
est
sommes
êtes
sont
serai
seras
sera
serons
serez
seront
serais
serait
serions
seriez
seraient
étais
était
étions
étiez
étaient
fus
fut
fûmes
fûtes
furent
sois
soit
soyons
soyez
soient
fusse
fusses
fût
fussions
fussiez
fussent
| forms of avoir (not including the infinitive):
ayant
ayante
ayantes
ayants
eu
eue
eues
eus
ai
as
avons
avez
ont
aurai
auras
aura
aurons
aurez
auront
aurais
aurait
aurions
auriez
auraient
avais
avait
avions
aviez
avaient
eut
eûmes
eûtes
eurent
aie
aies
ait
ayons
ayez
aient
eusse
eusses
eût
eussions
eussiez
eussent
"""
|
libracore/erpnext
|
erpnext/hr/doctype/leave_allocation/leave_allocation.py
|
Python
|
gpl-3.0
| 9,271
| 0.02373
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, date_diff, formatdate, add_days, today, getdate
from frappe import _
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name, get_leave_period
from erpnext.hr.doctype.leave_ledger_entry.leave_ledger_entry import expire_allocation, create_leave_ledger_entry
class OverlapError(frappe.ValidationError): pass
class BackDatedAllocationError(frappe.ValidationError): pass
class OverAllocationError(frappe.ValidationError): pass
class LessAllocationError(frappe.ValidationError): pass
class ValueMultiplierError(frappe.ValidationError): pass
class LeaveAllocation(Document):
def validate(self):
self.validate_period()
self.validate_new_leaves_allocated_value()
self.validate_allocation_overlap()
self.validate_back_dated_allocation()
self.set_total_leaves_allocated()
self.validate_total_leaves_allocated()
self.validate_lwp()
set_employee_name(self)
self.validate_leave_allocation_days()
def validate_leave_allocation_days(self):
company = frappe.db.get_value("Employee", self.employee, "company")
leave_period = get_leave_period(self.from_date, self.to_date, company)
max_leaves_allowed = frappe.db.get_value("Leave Type", self.leave_type, "max_leaves_allowed")
if max_leaves_allowed > 0:
leave_allocated = 0
if leave_period:
leave_allocated = get_leave_allocation_for_period(self.employee, self.leave_type,
leave_period[0].from_date, leave_period[0].to_date)
leave_allocated += self.new_leaves_allocated
if leave_allocated > max_leaves_allowed:
frappe.throw(_("Total allocated leaves are more days than maximum allocation of {0} leave type for employee {1} in the period")
.format(self.leave_type, self.employee))
def on_submit(self):
self.create_leave_ledger_entry()
# expire all unused leaves in the ledger on creation of carry forward allocation
allocation = get_previous_allocation(self.from_date, self.leave_type, self.employee)
if self.carry_forward and allocation:
expire_allocation(allocation)
def on_cancel(self):
self.create_leave_ledger_entry(submit=False)
if self.carry_forward:
self.set_carry_forwarded_leaves_in_previous_allocation(on_cancel=True)
def validate_period(self):
if date_diff(self.to_date, self.from_date) <= 0:
frappe.throw(_("To date cannot be before from date"))
def validate_lwp(self):
if frappe.db.get_value("Leave Type", self.leave_type, "is_lwp"):
frappe.throw(_("Leave Type {0} cannot be allocated since it is leave without pay").format(self.leave_type))
def validate_new_leaves_allocated_value(self):
"""validate that leave allocation is in multiples of 0.5"""
if flt(self.new_leaves_allocated) % 0.5:
frappe.throw(_("Leaves must be allocated in multiples of 0.5"), ValueMultiplierError)
def validate_allocation_overlap(self):
leave_allocation = frappe.db.sql("""
select name from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1
and to_date >= %s and from_date <= %s""",
(self.employee, self.leave_type, self.from_date, self.to_date))
if leave_allocation:
frappe.msgprint(_("{0} already allocated for Employee {1} for period {2} to {3}")
.format(self.leave_type, self.employee, formatdate(self.from_date), formatdate(self.to_date)))
frappe.throw(_('Reference') + ': <a href="#Form/Leave Allocation/{0}">{0}</a>'
.format(leave_allocation[0][0]), OverlapError)
def validate_back_dated_allocation(self):
future_allocation = frappe.db.sql("""select name, from_date from `tabLeave Allocation`
where employee=%s and leave_type=%s and docstatus=1 and from_date > %s
and carry_forward=1""", (self.employee, self.leave_type, self.to_date), as_dict=1)
if future_allocation:
frappe.throw(_("Leave cannot be allocated before {0}, as leave balance has already been carry-forwarded in the future leave allocation record {1}")
.format(formatdate(future_allocation[0].from_date), future_allocation[0].name),
BackDatedAllocationError)
def set_total_leaves_allocated(self):
self.unused_leaves = get_carry_forwarded_leaves(self.employee,
self.leave_type, self.from_date, self.carry_forward)
self.total_leaves_allocated = flt(self.unused_leaves) + flt(self.new_leaves_allocated)
self.limit_carry_forward_based_on_max_allowed_leaves()
if self.carry_forward:
self.set_carry_forwarded_leaves_in_previous_allocation()
if not self.total_leaves_allocated \
and not frappe.db.get_value("Leave Type", self.leave_type, "is_earned_leave") \
and not frappe.db.get_value("Leave Type", self.leave_type, "is_compensatory"):
frappe.throw(_("Total leaves allocated is mandatory for Leave Type {0}")
.format(self.leave_type))
def limit_carry_forward_based_on_max_allowed_leaves(self):
max_leaves_allowed = frappe.db.get_value("Leave Type", self.leave_type, "max_leaves_allowed")
if max_leaves_allowed and self.total_leaves_allocated > flt(max_leaves_allowed):
self.total_leaves_allocated = flt(max_leaves_allowed)
self.unused_leaves = max_leaves_allowed - flt(self.new_leaves_allocated)
def set_carry_forwarded_leaves_in_previous_allocation(self, on_cancel=False):
''' Set carry forwarded leaves in previous allocation '''
previous_allocation = get_previous_allocation(self.from_date, self.leave_type, self.employee)
if on_cancel:
self.unused_leaves = 0.0
if previous_allocation:
frappe.db.set_value("Leave Allocation", previous_allocation.name,
'carry_forwarded_leaves_count', self.unused_leaves)
def validate_total_leaves_allocated(self):
# Adding a day to include To Date in the difference
date_difference = date_diff(self.to_date, self.from_date) + 1
if date_difference < self.total_leaves_allocated:
frappe.throw(_("Total allocated leaves are more than days in the period"), OverAllocationError)
def create_leave_ledger_entry(self, submit=True):
if self.unused_leaves:
expiry_days = frappe.db.get_value("Leave Type", self.leave_type, "expire_carry_forwarded_leaves_after_days")
end_date = add_days(self.from_date, expiry_
|
days - 1) if expiry_days else self.to_date
args = dict(
leaves=self.unused_leaves,
from_date=self.from_date,
to_date= min(getdate(end_date), getdate(self.to_date)),
is_carry_forward=1
)
create_leave_ledger_entry(self, args, submit)
args = dict(
leaves=self.new_leaves_allocated,
from_date=self.from_date,
to_date=self.to_date,
is_carry_forward=0
)
create_leave_ledger_entry(self, args, submit)
def get_previous_allocati
|
on(from_date, leave_type, employee):
''' Returns document properties of previous allocation '''
return frappe.db.get_value("Leave Allocation",
filters={
'to_date': ("<", from_date),
'leave_type': leave_type,
'employee': employee,
'docstatus': 1
},
order_by='to_date DESC',
fieldname=['name', 'from_date', 'to_date', 'employee', 'leave_type'], as_dict=1)
def get_leave_allocation_for_period(employee, leave_type, from_date, to_date):
leave_allocated = 0
leave_allocations = frappe.db.sql("""
select employee, leave_type, from_date, to_date, total_leaves_allocated
from `tabLeave Allocation`
where employee=%(employee)s and leave_type=%(leave_type)s
and docstatus=1
and (from_date between %(from_date)s and %(to_date)s
or to_date between %(from_date)s and %(to_date)s
or (from_date < %(from_date)s and to_date > %(to_date)s))
""", {
"from_date": from_date,
"to_date": to_date,
"employee": employee,
"leave_type": leave_type
}, as_dict=1)
if leave_allocations:
for leave_alloc in leave_allocations:
leave_allocated += leave_alloc.total_leaves_allocated
return leave_allocated
@frappe.whitelist()
def get_carry_forwarded_leaves(employee, leave_type, date, carry_forward=None):
''' Returns carry forwarded leaves for the given employee '''
unused_leaves = 0.0
previous_allocation = get_previous_allocation(date, leave_type, employee)
if carry_forward and previous_allocation:
validate_carry_forward(lea
|
Brain888/OpenMineMods
|
GUI/Downloader.py
|
Python
|
agpl-3.0
| 4,748
| 0.001053
|
from PyQt5.QtCore import QThread, pyqtSignal
from API.CurseAPI import CurseAPI, CurseFile, CurseModpack
from PyQt5.QtWidgets import *
from GUI.Strings import Strings
strings = Strings()
translate = strings.get
class FileDownloaderWindow(QWidget):
def __init__(self, file: str, curse: CurseAPI, path: str, fname=False, callback=False):
super().__init__()
self.callback = callback
self.setWindowTitle(translate("downloading.update"))
self.layout = QVBoxLayout(self)
self.progress = QProgressBar()
self.layout.addWidget(self.progress)
self.show()
self.downloader = FileDownloaderThread(file, curse, path, fname)
self.downloader.done.connect(self.download_done)
self.downloader.update.connect(self.progress.setValue)
self.download_thread = QThread()
self.downloader.moveToThread(self.download_thread)
self.download_thread.started.connect(self.downloader.download)
self.download_thread.start()
def download_done(self):
if self.callback:
self.callback()
self.close()
self.destroy()
class FileDownloaderThread(QThread):
done = pyqtSignal()
update = pyqtSignal(int, name="ping")
def __init__(self, file: str, curse: CurseAPI, path: str, fname: str):
super().__init__()
self.file = file
self.path = path
self.fname = fname
self.curse = curse
def download(self):
self.curse.download_file(self.file, self.path, self.fname, self.update.emit)
self.done.emit()
class ModDownloaderWindow(QWidget):
def __init__(self, file: CurseFile, curse: CurseAPI, instance, initmods):
super().__init__()
self.initmods = initmods
self.setWindowTitle(translate("downloading.mod").format(file.name))
self.layout = QVBoxLayout(self)
self.progress = QProgressBar()
self.layout.addWidget(self.progress)
self.show()
self.downloader = ModDownloaderThread(file, curse, instance)
self.downloader.done.connect(self.download_done)
self.downloader.update.connect(self.progress.setValue)
self.download_thread = QThread()
self.downloader.moveToThread(self.download_thread)
self.download_thread.started.connect(self.downloader.download)
self.download_thread.start()
def download_done(self):
self.downloader.terminate()
self.download_thread.terminate()
self.initmods()
self.close()
self.destroy()
class ModDownloaderThread(QThread):
done = pyqtSignal()
update = pyqtSignal(int, name="ping")
def __init__(self, file: CurseFile, curse: CurseAPI, instance):
super().__init__()
self.file = file
self.curse = curse
self.instance = instance
def download(self):
self.instance.install_mod(self.file, self.curse, True, self.update.emit)
self.done.emit()
self.terminate()
class PackDownloaderWindow(QWidget):
def __init__(self, file: CurseFile, curse: CurseAPI, pack: CurseModpack):
super().__init__()
self.setWindowTitle(translate("downloading.pack").format(pack.project.title))
self.layout = QVBoxLayout(self)
self.label = QLabel()
self.layout.addWidget(self.label)
self.progress = QProgressBar()
self.layout.addWidget(self.progress)
self.prog2 = QProgressBar()
self.layout.addWidget(self.prog2)
self.show()
self.downloader = PackDownloaderThread(file, curse, pack)
self.downloader.done.connect(self.download_done)
self.downloader.bar1.connect(self.progress.setValue)
self.downloader.bar2.connect(self.prog2.setValue)
self.downloader.setLabel.connect(self.label.setText)
self.download_thread = QThread()
self.downloader.moveToThread(self.download_thread)
self.download_thread.started.connect(self.downloader.download)
self.download_thread.start()
def download_done(self):
self.downloader.terminate()
self.download_thread.terminate()
self.close()
self.destroy()
class PackDownloaderThread(QThread):
done = pyqtSignal()
setLabel = pyqtSignal(str, name="label")
bar1 = pyqtSignal(int, name="bar1")
bar2 = pyqtSignal(in
|
t, name="bar2")
def __init__(self, file: CurseFile, curse: CurseAPI, pack: CurseModpack):
super().__init__()
self.file = file
self.curse = curse
self.pack = pack
def download(self)
|
:
self.pack.install(self.file, self.setLabel.emit, self.bar1.emit, self.bar2.emit)
self.done.emit()
self.terminate()
|
envoyproxy/envoy
|
tools/api_proto_breaking_change_detector/detector.py
|
Python
|
apache-2.0
| 5,354
| 0.002615
|
""" Protocol Buffer Breaking Change Detector
This tool is used to detect "breaking changes" in protobuf files, to
ensure proper backwards-compatibility in protobuf API updates. The tool
can check for breaking changes of a single API by taking 2 .proto file
paths as input (before and after) and outputting a bool `is_breaking`.
The breaking change detector creates a temporary directory, copies in
each file to compute a protobuf "state", computes a diff of the "before"
and "after" states, and runs the diff against a set of rules to determine
if there was a breaking change.
The tool is currently implemented with buf (https://buf.build/)
"""
from pathlib import Path
from typing import List
from
|
tools.api_proto_breaking_change_detector.buf_utils import check_breaking, pull_buf_deps
from tools.api_proto_breaking_change_detector.detector_errors import ChangeDetectorError
class ProtoBreakingChangeDetector(object):
"""Ab
|
stract breaking change detector interface"""
def run_detector(self) -> None:
"""Run the breaking change detector to detect rule violations
This method should populate the detector's internal data such
that `is_breaking` does not require any additional invocations
to the breaking change detector.
"""
pass
def is_breaking(self) -> bool:
"""Return True if breaking changes were detected in the given protos"""
pass
def get_breaking_changes(self) -> List[str]:
"""Return a list of strings containing breaking changes output by the tool"""
pass
class BufWrapper(ProtoBreakingChangeDetector):
"""Breaking change detector implemented with buf"""
def __init__(
self,
path_to_changed_dir: str,
git_ref: str,
git_path: str,
subdir: str = None,
buf_path: str = None,
config_file_loc: str = None,
additional_args: List[str] = None) -> None:
"""Initialize the configuration of buf
This function sets up any necessary config without actually
running buf against any proto files.
BufWrapper takes a path to a directory containing proto files
as input, and it checks if these proto files break any changes
from a given initial state.
The initial state is input as a git ref. The constructor expects
a git ref string, as well as an absolute path to a .git folder
for the repository.
Args:
path_to_changed_dir {str} -- absolute path to a directory containing proto files in the after state
buf_path {str} -- path to the buf binary (default: "buf")
git_ref {str} -- git reference to use for the initial state of the protos (typically a commit hash)
git_path {str} -- absolute path to .git folder for the repository of interest
subdir {str} -- subdirectory within git repository from which to search for .proto files (default: None, e.g. stay in root)
additional_args {List[str]} -- additional arguments passed into the buf binary invocations
config_file_loc {str} -- absolute path to buf.yaml configuration file (if not provided, uses default buf configuration)
"""
if not Path(path_to_changed_dir).is_dir():
raise ValueError(f"path_to_changed_dir {path_to_changed_dir} is not a valid directory")
if Path.cwd() not in Path(path_to_changed_dir).parents:
raise ValueError(
f"path_to_changed_dir {path_to_changed_dir} must be a subdirectory of the cwd ({ Path.cwd() })"
)
if not Path(git_path).exists():
raise ChangeDetectorError(f'path to .git folder {git_path} does not exist')
self._path_to_changed_dir = path_to_changed_dir
self._additional_args = additional_args
self._buf_path = buf_path or "buf"
self._config_file_loc = config_file_loc
self._git_ref = git_ref
self._git_path = git_path
self._subdir = subdir
self._final_result = None
pull_buf_deps(
self._buf_path,
self._path_to_changed_dir,
config_file_loc=self._config_file_loc,
additional_args=self._additional_args)
def run_detector(self) -> None:
self._final_result = check_breaking(
self._buf_path,
self._path_to_changed_dir,
git_ref=self._git_ref,
git_path=self._git_path,
subdir=self._subdir,
config_file_loc=self._config_file_loc,
additional_args=self._additional_args)
def is_breaking(self) -> bool:
if not self._final_result:
raise ChangeDetectorError("Must invoke run_detector() before checking if is_breaking()")
final_code, final_out, final_err = self._final_result
final_out, final_err = '\n'.join(final_out), '\n'.join(final_err)
if final_err != "":
raise ChangeDetectorError(f"Error from buf: {final_err}")
if final_code != 0:
return True
if final_out != "":
return True
return False
def get_breaking_changes(self) -> List[str]:
_, final_out, _ = self._final_result
return filter(lambda x: len(x) > 0, final_out) if self.is_breaking() else []
|
egabancho/invenio
|
invenio/legacy/bibmatch/engine.py
|
Python
|
gpl-2.0
| 64,532
| 0.005067
|
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2014, 2015 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from __future__ import print_function
"""BibMatch - tool to match records with database content of an Invenio instance,
either locally or remotely through invenio_connector."""
__revision__ = "$Id$"
import sys
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set #for "&" intersection
# pylint: enable=W0622
import string
import os
import getopt
import re
import getpass
from six import iteritems
from tempfile import mkstemp
from time import sleep
from invenio.config import CFG_SITE_SECURE_URL, CFG_BIBMATCH_FUZZY_WORDLIMITS, \
CFG_BIBMATCH_QUERY_TEMPLATES, \
CFG_BIBMATCH_FUZZY_EMPTY_RESULT_LIMIT, \
CFG_BIBMATCH_LOCAL_SLEEPTIME, \
CFG_BIBMATCH_REMOTE_SLEEPTIME, \
CFG_SITE_RECORD, \
CFG_BIBMATCH_SEARCH_RESULT_MATCH_LIMIT
from invenio.legacy.bibmatch.config import CFG_BIBMATCH_LOGGER, \
CFG_LOGFILE
from invenio_client import InvenioConnector, \
InvenioConnectorAuthError
from invenio.legacy.bibrecord import create_records, \
record_get_field_values, record_xml_output, record_modify_controlfield, \
record_has_field, record_add_field
from invenio.legacy.bibconvert import api as bibconvert
from invenio.legacy.search_engine import get_fieldcodes, \
re_pattern_single_quotes, \
re_pattern_double_quotes, \
re_pattern_regexp_quotes, \
re_pattern_spaces_after_colon
from invenio.legacy.search_engine.query_parser import SearchQueryParenthesisedParser
from invenio.legacy.dbquery import run_sql
from invenio.legacy.bibrecord.textmarc2xmlmarc import transform_file
from invenio.legacy.bibmatch.validator import validate_matches, transform_record_to_marc, \
validate_tag, BibMatchValidationError
from invenio.utils.text import translate_to_ascii, xml_entities_to_utf8
try:
from six import StringIO
except ImportError:
from StringIO import StringIO
re_querystring = re.compile("\s?([^\s$]*)\[(.+?)\]([^\s$]*).*?", re.DOTALL)
def usage():
"""Print help"""
print(""" BibMatch - match bibliographic data against database, either locally or remotely
Usage: %s [options] [QUERY]
Options:
Output:
-0 --print-new (default) print unmatched in stdout
-1 --print-match print matched records in stdout
-2 --print-ambiguous print records that match more than 1 existing r
|
ecords
-3 --print-fuzzy print records that match the longest words in existing records
-b --batch-output=(filename). filename.new will be new records, filename.matched will be matched,
filename.ambiguous will be ambiguous, filename.fuzzy will be fuzzy match
-t --text-marc-output transform the output to text-marc format instead of the default MARCXML
Simple query:
-q --query-string=(search-query/predefined-query) See "Querystring"-section below.
-f --field=(field)
General opti
|
ons:
-n --noprocess Do not print records in stdout.
-i, --input use a named file instead of stdin for input
-v, --verbose=LEVEL verbose level (from 0 to 9, default 1)
-r, --remote=URL match against a remote Invenio installation (Full URL, no trailing '/')
Beware: Only searches public records attached to home collection
-a, --alter-recid The recid (controlfield 001) of matched or fuzzy matched records in
output will be replaced by the 001 value of the matched record.
Note: Useful if you want to replace matched records using BibUpload.
-z, --clean clean queries before searching
--no-validation do not perform post-match validation
-h, --help print this help and exit
-V, --version print version information and exit
Advanced options:
-m --mode=(a|e|o|p|r) perform an advanced search using special search mode.
Where mode is:
"a" all of the words,
"o" any of the words,
"e" exact phrase,
"p" partial phrase,
"r" regular expression.
-o --operator(a|o) used to concatenate identical fields in search query (i.e. several report-numbers)
Where operator is:
"a" boolean AND (default)
"o" boolean OR
-c --config=filename load querystrings from a config file. Each line starting with QRYSTR will
be added as a query. i.e. QRYSTR --- [title] [author]
-x --collection only perform queries in certain collection(s).
Note: matching against restricted collections requires authentication.
--user=USERNAME username to use when connecting to Invenio instance. Useful when searching
restricted collections. You will be prompted for password.
QUERYSTRINGS
Querystrings determine which type of query/strategy to use when searching for the
matching records in the database.
Predefined querystrings:
There are some predefined querystrings available:
title - standard title search. (i.e. "this is a title") (default)
title-author - title and author search (i.e. "this is a title AND Lastname, F")
reportnumber - reportnumber search (i.e. reportnumber:REP-NO-123).
You can also add your own predefined querystrings inside invenio.conf file.
You can structure your query in different ways:
* Old-style: fieldnames separated by '||' (conforms with earlier BibMatch versions):
-q "773__p||100__a"
* New-style: Invenio query syntax with "bracket syntax":
-q "773__p:\"[773__p]\" 100__a:[100__a]"
Depending on the structure of the query, it will fetch associated values from each record and put it into
the final search query. i.e in the above example it will put journal-title from 773__p.
When more then one value/datafield is found, i.e. when looking for 700__a (additional authors),
several queries will be put together to make sure all combinations of values are accounted for.
The queries are separated with given operator (-o, --operator) value.
Note: You can add more then one query to a search, just give more (-q, --query-string) arguments.
The results of all queries will be combined when matching.
BibConvert formats:
Another option to further improve your matching strategy is to use BibConvert formats. By using the formats
available by BibConvert you can change the values from the retrieved record-fields.
i.e. using WORDS(1,R) will only return the first (1) word from the right (R). This can be very useful when
adjusting your matching parameters to better match the content. For example only getting authors last-name
instead of full-name.
You can use these formats directly in the querystrings (indicated by '::'):
* Old-style: -q "100__a::WORDS(1,R)::DOWN()"
This query will take first word from the right from 100__a and also convert it to lower-case.
* New-style:
|
tangfeixiong/packstack
|
packstack/plugins/mysql_001.py
|
Python
|
apache-2.0
| 5,760
| 0.009028
|
"""
Installs and configures MySQL
"""
import uuid
import logging
from packstack.installer import validators
from packstack.installer import utils
from packstack.modules.ospluginutils import getManifestTemplate, appendManifestFile
# Controller object will be initialized from main flow
controller = None
# Plugin name
PLUGIN_NAME = "OS-MySQL"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
logging.debug("plugin %s loaded", __name__)
def initConfig(controllerObject):
global controller
controller = controllerObject
logging.debug("Adding MySQL OpenStack configuration")
paramsList = [
{"CMD_OPTION" : "mysql-host",
"USAGE" : "The IP address of the server on which to install MySQL",
"PROMPT" : "Enter the IP address of the MySQL server",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_ssh],
"DEFAULT_VALUE" : utils.get_localhost_ip(),
"MASK_INPUT" : False,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_MYSQL_HOST",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "mysql-user",
"USAGE" : "Username for the MySQL admin user",
"PROMPT" : "Enter the username for the MySQL admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : "root",
"MASK_INPUT" : False,
"LOOSE_VALIDATION": False,
"CONF_NAME" : "CONFIG_MYSQL_USER",
"USE_DEFAULT" : True,
"NEED_CONFIRM" : False,
"CONDITION" : False },
{"CMD_OPTION" : "mysql-pw",
"USAGE" : "Password for the MySQL admin user",
"PROMPT" : "Enter the password for the MySQL admin user",
"OPTION_LIST" : [],
"VALIDATORS" : [validators.validate_not_empty],
"DEFAULT_VALUE" : uuid.uuid4().hex[:16],
"MASK_INPUT" : True,
"LOOSE_VALIDATION": True,
"CONF_NAME" : "CONFIG_MYSQL_PW",
"USE_DEFAULT" : False,
"NEED_CONFIRM" : True,
"CONDITION" : False },
]
groupDict = { "GROUP_NAME" : "MYSQL",
"DESCRIPTION" : "MySQL Config parameters",
"PRE_CONDITION" : lambda x: 'yes',
"PRE_CONDITION_MATCH" : "yes",
"POST_CONDITION" : False,
"POST_CONDITION_MATCH" : True}
controller.addGroup(groupDict, paramsList)
def initSequences(controller):
mysqlsteps = [
{'title': 'Adding MySQL manifest entries',
'functions':[createmanifest]}
]
controller.addSequence("Installing MySQL", [], [], mysqlsteps)
def createmanifest(config):
if config['CONFIG_MYSQL_INSTALL'] == 'y':
install = True
suffix = 'install'
else:
install = False
suffix = 'noinstall'
# In case we are not installing MySQL server, mysql* manifests have
# to be run from Keystone host
host = install and config['CONFIG_MYSQL_HOST'] \
or config['CONFIG_KEYSTONE_HOST']
manifestfile = "%s_mysql.pp" % host
manifestdata = [getManifestTemplate('mysql_%s.pp' % suffix)]
def append_for(module, suffix):
# Modules have to be appended to the existing mysql.pp
# otherwise pp will fail for some of them saying that
# Mysql::Config definition is missing.
template = "mysql_%s_%s.pp" % (module, suffix)
manifestdata.append(getManifestTemplate(template))
append_for("keystone", suffix)
hosts = set()
for mod in ['nova', 'cinder', 'glance', 'neutron', 'heat']:
if config['CONFIG_%s_INSTALL' % mod.upper()] == 'y':
append_for(mod, suffix)
# Check wich modules are enabled so we can allow their
# hosts on the firewall
if mod != 'nova' and mod != 'neutron':
hosts.add(config.get('CONFIG_%s_HOST' % mod.upper()).strip())
elif mod == 'neutron':
hosts.add(config.get('CONFIG_NEUTRON_SERVER_HOST').strip())
elif config['CONFIG_NOVA_INSTALL'] != 'n':
#In that remote case that we have lot's of nova hosts
hosts.add(config.get('CONFIG_NOVA_API_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_CERT_HOST').strip())
|
hosts.add(config.get('CONFIG_NOVA_VNCPROXY_HOST').strip())
|
hosts.add(config.get('CONFIG_NOVA_CONDUCTOR_HOST').strip())
hosts.add(config.get('CONFIG_NOVA_SCHED_HOST').strip())
if config['CONFIG_NEUTRON_INSTALL'] != 'y':
dbhosts = split_hosts(config['CONFIG_NOVA_NETWORK_HOSTS'])
hosts |= dbhosts
for host in config.get('CONFIG_NOVA_COMPUTE_HOSTS').split(','):
hosts.add(host.strip())
config['FIREWALL_ALLOWED'] = ",".join(["'%s'" % i for i in hosts])
config['FIREWALL_SERVICE_NAME'] = "mysql"
config['FIREWALL_PORTS'] = "'3306'"
manifestdata.append(getManifestTemplate("firewall.pp"))
appendManifestFile(manifestfile, "\n".join(manifestdata), 'pre')
|
Debian/openjfx
|
modules/web/src/main/native/Tools/QueueStatusServer/handlers/updatestatus.py
|
Python
|
gpl-2.0
| 3,275
| 0.001527
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.api import users
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp import template
from handlers.updatebase import UpdateBase
from loggers.recordbotevent import RecordBotEvent
from loggers.recordpatchevent import RecordPatchEvent
from model.attachment import Attachment
from model.queuestatus import QueueStatus
class UpdateStatus(UpdateBase):
def get(self):
self.response.out.write(template.render("templates/updatestatus.html", None))
def _queue_status_from_request(self):
queue_status = QueueStatus()
# FIXME: I think this can be removed, no one uses it.
if users.get_current_user():
queue_status.author = users.get_current_user()
bug_id = self._int_from_request("bug_id")
patch_id = self._int_from_request("patch_id")
queue_name = self.request.get("queue_name")
bot_id = self.request.get("bot_id")
queue_status.queue_name = queue_name
queue_status.bot_id = bot_id
queue_status.active_bug_id = bug_id
queue_status.active_patch_id = patch_id
queue_status.message = self.request.get("status")
|
results_file = self.request.get("results_file")
queue_status.results_file = db.Blob(str(results_file))
return queue_status
def post(self):
queue_status = self._queue_status_from_request()
queue_status.put()
RecordBotEvent.record_activity(queue_status.queue_name, queue_status.bot_id)
if queue_status.active_patch_id:
RecordPatchEvent.updated(queue_status.active_patch_id, queue_status.queue_name, queue_status.message, queue_st
|
atus.bot_id)
self.response.out.write(queue_status.key().id())
|
Jumpscale/jumpscale_core8
|
lib/JumpScale/data/params/Params.py
|
Python
|
apache-2.0
| 5,825
| 0.000687
|
from JumpScale import j
"""
Provides the Params object and the ParamsFactory that is used in the Q-Tree
"""
class ParamsFactory:
"""
This factory can create new Params objects
"""
def __init__(self):
self.__jslocation__ = "j.data.params"
def get(self, dictObject={}):
"""
Create and return a new Params object
@param dictObject when dict given then dict will be converted into params
@return: a new Params object
@rtype: Params
"""
return Params(dictObject)
def isParams(self, p):
"""
Return if the argument object is an instance of Params
@param p: object to check
@type p: object
@return: Whether or not `p` is a Params instance
@rtype: boolean
"""
return isinstance(p, Params)
class Params:
def __init__(self, dictObject=None):
if dictObject is not None:
self.__dict__ = dictObject
def merge(self, otherParams):
self.__dict__.update(otherParams.__dict__)
def get(self, key, defaultvalue=None):
return self.__dict__.get(key, defaultvalue)
def __contains__(self, key):
return key in self.__dict__
def __getitem__(self, key):
return self.__dict__[key]
def expandParamsAsDict(self, **kwargs):
"""
adds paramsExtra, tags & params from requestContext if it exists
return as dict
for each item given as named argume
|
nt check it is already in dict and if not add
e.g. args=self.expandParamsAsDict(id=1,name="test")
will return a dict with id & name and these values unless if they were set in the params already
can further use it as follows:
params.result=infomgr.getInfoWithHeaders(**args)
full example:
#############
args=params.expandParamsAsDict(maxvalues=100,id=None,start="-3d",stop=
|
None)
args["start"]=j.data.time.getEpochAgo(args["start"])
args["stop"]=j.data.time.getEpochFuture(args["stop"])
params.result=j.apps.system.infomgr.extensions.infomgr.addInfo(**args)
"""
params = self
params2 = params.getDict()
if "paramsExtra" in params and params.paramsExtra is not None:
params2.update(params.paramsExtra)
if "requestContext" in params and params.requestContext is not None:
params2.update(params.requestContext.params)
if "tags" in params and params2["tags"] != "":
params2.update(params2["tags"].getDict())
for item in ["requestContext", "tags", "paramsExtra"]:
if item in params:
params2.pop(item)
if len(kwargs) == 0:
return params2
result = {}
for key in list(kwargs.keys()):
if key in params2:
result[key] = params2[key]
return result
def expandParams(self, **kwargs):
"""
adds paramsExtra, tags & params from requestContext if it exists
returns params but not needed because params just get modified to have all these extra arguments/params as properties
set default as params to this method e.g.
expandParams(id=10,hight=100)
"""
def getArgs(d):
r = {}
reserved = ["name", "doc", "macro",
"macrostr", "cmdstr", "page", "tags"]
for key in list(d.keys()):
if key in reserved:
r["arg_%s" % key] = d[key]
else:
r[key] = d[key]
return r
if "paramsExtra" in self and self.paramsExtra is not None:
self.setDict(getArgs(self.paramsExtra))
# self.pop("paramsExtra")
if "requestContext" in self and self.requestContext is not None:
self.setDict(getArgs(self.requestContext.params))
# self.pop("requestContext")
if "tags" in self and self.tags != "":
self.setDict(getArgs(self.tags.getDict()))
# self.pop("tags")
for argname in list(kwargs.keys()):
if argname not in self.__dict__:
self.__dict__[argname] = kwargs[argname]
return self
def getTag(self, name, default=None):
tags = getattr(self, 'tags', None)
if not tags:
return default
tags = tags.getDict()
tag = tags.get(name)
if tag and j.data.text.toStr(tag).startswith('$$'):
return default
if not tag:
return default
return tag
def pop(self, key):
if key in self:
self.__dict__.pop(key)
def has_key(self, key):
return key in self.__dict__
def getDict(self):
return self.__dict__
def setDict(self, dictObject):
self.__dict__.update(dictObject)
def extend(self, params):
"""
Update this Params object with the contents of the argument Params
object
@param params: the Params or dict object to update from
@type params: dict or Params
@raise TypeError: if the argument is not a dict or Params object
"""
if isinstance(params, Params):
d = params.__dict__
elif isinstance(params, dict):
d = params
else:
raise TypeError("Argument params is of an unknown type %s" %
type(params))
self.__dict__.update(d)
# def __dir__(self):
# return sorted(dir(super(Params, self)) + self.__dict__.keys())
def __repr__(self):
parts = ["PARAMS:"]
for key, value in list(self.__dict__.items()):
parts.append(" %s:%s" % (key, value))
return "\n".join(parts)
def __str__(self):
return self.__repr__()
|
gtesei/fast-furious
|
dataset/images2/simple_classification.py
|
Python
|
mit
| 1,266
| 0.011848
|
import mahotas as mh
from sklearn import cross_validation
from sklearn.linear_model.logistic import LogisticRegression
import numpy as np
from glob import glob
from edginess import edginess_sobel
#basedir = 'simple-dataset'
basedir = 'simple-dataset/'
def features_for(im):
im = mh.imread(im,as_grey=True).astype(np.uint8)
return mh.features.haralick(im).mean(0)
features = []
sobels = []
labels = []
images = glob('{}/*.jpg'.format(basedir))
for im in images:
features.append(features_for(im))
sobels.append(edginess_sobel(mh.imread(im, as_grey=True)))
labels.append(im[:-len('00.jpg')])
features = np.array(features)
labels = np.array(labels)
n = features.shape;
nl = labels.shape;
print('features='+str(n))
print(str(features))
print ('labels='+str(nl))
print(str(labels))
scores = cross_validation.cross_val_score(LogisticRegression(), features, labels,
|
cv=5)
print('Accuracy (5 fold x-val) with Logistic Regrssion [std features]: {}%'.format(0.1* round(1000*scores.mean())))
scores = cross_validation.cross_val_score(LogisticRegression(), np.hstack([np.atleast_2d(sobels).T,features]), labels, cv=5).mean()
print('Accuracy (5 fold x-val) with
|
Logistic Regrssion [std features + sobel]: {}%'.format(0.1* round(1000*scores.mean())))
|
youtube/cobalt
|
third_party/devtools/scripts/build/build_debug_applications.py
|
Python
|
bsd-3-clause
| 2,246
| 0.000894
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 20
|
16 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Builds applications in debug mode:
- Copies the module directories into their destinations.
- Copies app.html as-is.
"""
from os import path
from os.path import join
import os
import shutil
import sys
import modular_build
def main(argv):
try
|
:
input_path_flag_index = argv.index('--input_path')
input_path = argv[input_path_flag_index + 1]
output_path_flag_index = argv.index('--output_path')
output_path = argv[output_path_flag_index + 1]
build_stamp_index = argv.index('--build_stamp')
build_stamp_path = argv[build_stamp_index + 1]
except:
print('Usage: %s app_1 app_2 ... app_N --input_path <input_path> --output_path <output_path>' % argv[0])
raise
symlink_dir_or_copy(input_path, output_path)
with open(build_stamp_path, 'w') as file:
file.write('stamp')
def symlink_dir_or_copy(src, dest):
if hasattr(os, 'symlink'):
if path.exists(dest):
if os.path.islink(dest):
os.unlink(dest)
else:
shutil.rmtree(dest)
os.symlink(join(os.getcwd(), src), dest)
else:
for filename in os.listdir(src):
new_src = join(os.getcwd(), src, filename)
if os.path.isdir(new_src):
copy_dir(new_src, join(dest, filename))
else:
copy_file(new_src, join(dest, filename), safe=True)
def copy_file(src, dest, safe=False):
if safe and path.exists(dest):
os.remove(dest)
shutil.copy(src, dest)
def copy_dir(src, dest):
if path.exists(dest):
shutil.rmtree(dest)
for src_dir, dirs, files in os.walk(src):
subpath = path.relpath(src_dir, src)
dest_dir = path.normpath(join(dest, subpath))
os.makedirs(dest_dir)
for name in files:
src_name = join(os.getcwd(), src_dir, name)
dest_name = join(dest_dir, name)
copy_file(src_name, dest_name)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gasparmoranavarro/TopoDelProp
|
forms/frmIntrodDatos.py
|
Python
|
gpl-2.0
| 8,916
| 0.003141
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Qgs18/apps/qgis/python/plugins/TopoDelProp/forms_ui/frmIntrodDatos.ui'
#
# Created: Fri Nov 09 12:38:15 2012
# by: PyQt4 UI code generator 4.8.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_frmIntrodDatos(object):
def setupUi(self, frmIntrodDatos):
frmIntrodDatos.setObjectName(_fromUtf8("frmIntrodDatos"))
frmIntrodDatos.setWindowModality(QtCore.Qt.NonModal)
frmIntrodDatos.resize(975, 591)
frmIntrodDatos.setWindowTitle(QtGui.QApplication.translate("frmIntrodDatos", "TopoDelProp. Introducción de datos de: ", None, QtGui.QApplication.UnicodeUTF8))
frmIntrodDatos.setModal(False)
self.tableWidget = QtGui.QTableWidget(frmIntrodDatos)
self.tableWidget.setGeometry(QtCore.QRect(10, 40, 741, 371))
self.tableWidget.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Pinche sobre el campo a rellenar", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.tableWidget.setAutoScroll(True)
self.tableWidget.setAutoScrollMargin(0)
self.tableWidget.setVerticalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.tableWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerPixel)
self.tableWidget.setCornerButtonEnabled(True)
self.tableWidget.setRowCount(5)
self.tableWidget.setColumnCount(2)
self.tableWidget.setObjectName(_fromUtf8("tableWidget"))
self.tableWidget.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidget.horizontalHeader().setStretchLastSection(False)
self.tableWidget.verticalHeader().setVisible(True)
self.tableWidget.verticalHeader().setCascadingSectionResizes(False)
self.tableWidget.verticalHeader().setHighlightSections(True)
self.listWidget = QtGui.QListWidget(frmIntrodDatos)
self.listW
|
idget.setGeometry(QtCore.QRect(750, 70, 221, 341))
self.listWidget.setTo
|
olTip(QtGui.QApplication.translate("frmIntrodDatos", "Seleccione el valor a introducir en la tabla", None, QtGui.QApplication.UnicodeUTF8))
self.listWidget.setStatusTip(_fromUtf8(""))
self.listWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.listWidget.setAutoScroll(True)
self.listWidget.setAlternatingRowColors(True)
self.listWidget.setHorizontalScrollMode(QtGui.QAbstractItemView.ScrollPerItem)
self.listWidget.setObjectName(_fromUtf8("listWidget"))
self.bttGuardar = QtGui.QPushButton(frmIntrodDatos)
self.bttGuardar.setGeometry(QtCore.QRect(730, 420, 131, 31))
self.bttGuardar.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Guarda los cambios del elemento", None, QtGui.QApplication.UnicodeUTF8))
self.bttGuardar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Guardar cambios", None, QtGui.QApplication.UnicodeUTF8))
self.bttGuardar.setObjectName(_fromUtf8("bttGuardar"))
self.bttTerminar = QtGui.QPushButton(frmIntrodDatos)
self.bttTerminar.setGeometry(QtCore.QRect(860, 420, 111, 31))
self.bttTerminar.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Termina la introduccion de datos del elemento", None, QtGui.QApplication.UnicodeUTF8))
self.bttTerminar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Terminar", None, QtGui.QApplication.UnicodeUTF8))
self.bttTerminar.setObjectName(_fromUtf8("bttTerminar"))
self.label = QtGui.QLabel(frmIntrodDatos)
self.label.setGeometry(QtCore.QRect(10, 10, 221, 16))
self.label.setText(QtGui.QApplication.translate("frmIntrodDatos", "Datos del elemento. Elija un campo:", None, QtGui.QApplication.UnicodeUTF8))
self.label.setObjectName(_fromUtf8("label"))
self.lbLista = QtGui.QLabel(frmIntrodDatos)
self.lbLista.setGeometry(QtCore.QRect(750, 20, 201, 16))
self.lbLista.setText(QtGui.QApplication.translate("frmIntrodDatos", "Valores del campo seleccionado:", None, QtGui.QApplication.UnicodeUTF8))
self.lbLista.setObjectName(_fromUtf8("lbLista"))
self.lbEstado = QtGui.QLabel(frmIntrodDatos)
self.lbEstado.setGeometry(QtCore.QRect(10, 490, 951, 101))
self.lbEstado.setText(_fromUtf8(""))
self.lbEstado.setObjectName(_fromUtf8("lbEstado"))
self.tbId_trabajo = QtGui.QLineEdit(frmIntrodDatos)
self.tbId_trabajo.setEnabled(False)
self.tbId_trabajo.setGeometry(QtCore.QRect(120, 420, 121, 22))
self.tbId_trabajo.setReadOnly(False)
self.tbId_trabajo.setObjectName(_fromUtf8("tbId_trabajo"))
self.tbSrc_trabajo = QtGui.QLineEdit(frmIntrodDatos)
self.tbSrc_trabajo.setEnabled(False)
self.tbSrc_trabajo.setGeometry(QtCore.QRect(120, 440, 121, 22))
self.tbSrc_trabajo.setReadOnly(False)
self.tbSrc_trabajo.setObjectName(_fromUtf8("tbSrc_trabajo"))
self.label_2 = QtGui.QLabel(frmIntrodDatos)
self.label_2.setGeometry(QtCore.QRect(20, 420, 81, 16))
self.label_2.setText(QtGui.QApplication.translate("frmIntrodDatos", "ID del trabajo:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtGui.QLabel(frmIntrodDatos)
self.label_3.setGeometry(QtCore.QRect(20, 440, 101, 16))
self.label_3.setText(QtGui.QApplication.translate("frmIntrodDatos", "SRC del trabajo:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.bttEditar = QtGui.QPushButton(frmIntrodDatos)
self.bttEditar.setGeometry(QtCore.QRect(640, 420, 91, 31))
self.bttEditar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Editar", None, QtGui.QApplication.UnicodeUTF8))
self.bttEditar.setObjectName(_fromUtf8("bttEditar"))
self.bttBuscar = QtGui.QPushButton(frmIntrodDatos)
self.bttBuscar.setGeometry(QtCore.QRect(490, 420, 71, 31))
self.bttBuscar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Buscar", None, QtGui.QApplication.UnicodeUTF8))
self.bttBuscar.setObjectName(_fromUtf8("bttBuscar"))
self.bttDescargar = QtGui.QPushButton(frmIntrodDatos)
self.bttDescargar.setGeometry(QtCore.QRect(270, 420, 121, 31))
self.bttDescargar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Descargar archivo", None, QtGui.QApplication.UnicodeUTF8))
self.bttDescargar.setObjectName(_fromUtf8("bttDescargar"))
self.bttNuevo = QtGui.QPushButton(frmIntrodDatos)
self.bttNuevo.setGeometry(QtCore.QRect(560, 420, 81, 31))
self.bttNuevo.setText(QtGui.QApplication.translate("frmIntrodDatos", "Nuevo", None, QtGui.QApplication.UnicodeUTF8))
self.bttNuevo.setObjectName(_fromUtf8("bttNuevo"))
self.txtFiltrar = QtGui.QLineEdit(frmIntrodDatos)
self.txtFiltrar.setGeometry(QtCore.QRect(750, 40, 221, 31))
self.txtFiltrar.setObjectName(_fromUtf8("txtFiltrar"))
self.bttBorrar = QtGui.QPushButton(frmIntrodDatos)
self.bttBorrar.setGeometry(QtCore.QRect(390, 420, 101, 31))
self.bttBorrar.setToolTip(QtGui.QApplication.translate("frmIntrodDatos", "Borra este registro", None, QtGui.QApplication.UnicodeUTF8))
self.bttBorrar.setText(QtGui.QApplication.translate("frmIntrodDatos", "Borrar ", None, QtGui.QApplication.UnicodeUTF8))
self.bttBorrar.setObjectName(_fromUtf8("bttBorrar"))
self.tbMunicipio = QtGui.QLineEdit(frmIntrodDatos)
self.tbMunicipio.setEnabled(False)
self.tbMunicipio.setGeometry(QtCore.QRect(120, 460, 381, 22))
self.tbMunicipio.setReadOnly(False)
self.tbMunicipio.setObjectName(_fromUtf8("tbM
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_05_01/operations/_peer_express_route_circuit_connections_operations.py
|
Python
|
mit
| 9,496
| 0.004844
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PeerExpressRouteCircuitConnectionsOperations(object):
"""PeerExpressRouteCircuitConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PeerExpressRouteCircuitConnection"
"""Gets the specified Peer Express Route Circuit Connection from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param connection_name: The name of the peer express route circuit connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PeerExpressRouteCircuitConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_05_01.models.PeerExpressRouteCircuitConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name,
|
'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
'subscriptionId': self._serialize.url("self._confi
|
g.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PeerExpressRouteCircuitConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/peerConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
circuit_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PeerExpressRouteCircuitConnectionListResult"]
"""Gets all global reach peer connections associated with a private peering in an express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PeerExpressRouteCircuitConnectionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2021_05_01.models.PeerExpressRouteCircuitConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PeerExpressRouteCircuitConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
|
Elizaveta239/PyDev.Debugger
|
tests_python/resources/_debugger_case_breakpoint_remote_no_import.py
|
Python
|
epl-1.0
| 406
| 0.009852
|
if __name__ == '__main
|
__':
import os
import sys
port = int(sys.argv[1])
root_dirname = os.path.dirname(os.path.dirname(__file__))
if root_dirname not in sys.path:
sys.path.append(root_dirname)
print('before pydevd.settrace')
breakpoi
|
nt(port=port) # Set up through custom sitecustomize.py
print('after pydevd.settrace')
print('TEST SUCEEDED!')
|
open-synergy/opnsynid-hr
|
hr_employee_job_family_from_contract/__openerp__.py
|
Python
|
agpl-3.0
| 676
| 0
|
# -*- coding: utf-8 -*-
# Copyright 2019 OpenSynergy Indonesia
# Copyright 2022 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# pylint: disable=locally-disabled, manifest-required-author
{
"name": "Employee Job Family From Contract",
"version": "8.0.1.0.0",
|
"category": "Human Resource",
"website": "https://simetri
|
-sinergi.id",
"author": "OpenSynergy Indonesia, PT. Simetri Sinergi Indonesia",
"license": "AGPL-3",
"installable": True,
"depends": [
"hr_employee_data_from_contract",
"hr_job_family_modelling",
],
"data": [
"views/hr_contract_views.xml",
],
}
|
flipchan/LayerProx
|
versions/offthewire_version/marionette_tg/client.py
|
Python
|
apache-2.0
| 3,635
| 0.004402
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import random
sys.path.append('.')
from twisted.internet import reactor
from twisted.python import log
from . import driver
from . import multiplexer
from . import record_layer
from . import updater
from . import dsl
from . import conf
EVENT_LOOP_FREQUENCY_S = 0.01
AUTOUPDATE_DELAY = 5
class Client(object):
def __init__(self, format_name, format_version):
self.multiplexer_outgoing_ = multiplexer.BufferOutgoing()
self.multiplexer_incoming_ = multiplexer.BufferIncoming()
self.multiplexer_incoming_.addCallback(self.process_cell)
self.streams_ = {}
self.stream_counter_ = random.randint(1,2**32-1)
self.set_driver(format_name, format_version)
self.reload_ = False
# first update must be
reactor.callLater(AUTOUPDATE_DELAY, self.check_for_update)
def set_driver(self, format_name, format_version=None):
self.format_name_ = format_name
if format_version == None:
self.format_version_ = dsl.get_latest_version(
'client', format_name)
else:
self.format_version_ = format_version
self.driver_ = driver.ClientDriver("client")
self.driver_.set_multiplexer_incoming(self.multiplexer_incoming_)
self.driver_.set_multiplexer_outgoing(self.multiplexer_outgoing_)
self.driver_.setFormat(self.format_name_, self.format_version_)
def get_format(self):
retval = str(self.format_name_) + \
':' + \
str(self.format_version_)
return retval
def execute(self, reactor):
if self.driver_.isRunning():
self.driver_.execute(reactor)
else:
if self.reload_:
self.set_driver(self.format_name_
|
)
self.reload_ = False
|
self.driver_.reset()
reactor.callLater(EVENT_LOOP_FREQUENCY_S, self.execute, reactor)
def process_cell(self, cell_obj):
payload = cell_obj.get_payload()
if payload:
stream_id = cell_obj.get_stream_id()
try:
self.streams_[stream_id].srv_queue.put(payload)
except:
log.msg("Client.process_cell: Caught KeyError exception for stream_id :%d"
% (stream_id))
return
def start_new_stream(self, srv_queue=None):
stream = multiplexer.MarionetteStream(
self.multiplexer_incoming_,
self.multiplexer_outgoing_,
self.stream_counter_,
srv_queue)
stream.host = self
self.streams_[self.stream_counter_] = stream
self.stream_counter_ = random.randint(1,2**32-1)
return stream
def terminate(self, stream_id):
del self.streams_[stream_id]
# call this function if you want reload formats from disk
# at the next possible time
def reload_driver(self):
self.reload_ = True
def check_for_update(self):
# uncomment the following line to check for updates every N seconds
# instead of just on startup
# reactor.callLater(N, self.check_for_update, reactor)
if conf.get("general.autoupdate"):
self.do_update(self.reload_driver)
def do_update(self, callback):
# could be replaced with code that updates from a different
# source (e.g., local computations)
update_server = conf.get("general.update_server")
updater = updater.FormatUpdater(update_server, use_marionette=True, callback=callback)
return updater.do_update()
|
Communities-Communications/cc-odoo
|
addons/website_sale/models/sale_order.py
|
Python
|
agpl-3.0
| 10,347
| 0.007055
|
# -*- coding: utf-8 -*-
import random
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.addons.web.http import request
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_qty(self, cr, uid, ids, field_name, arg, context=None):
res = dict()
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = int(sum(l.product_uom_qty for l in (order.website_order_line or [])))
return res
_columns = {
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
'cart_quantity': fields.function(_cart_qty, type='integer', string='Cart Quantity'),
'payment_acquirer_id': fields.many2one('payment.acquirer', 'Payment Acquirer', on_delete='set null', copy=False),
'payment_tx_id': fields.many2one('payment.transaction', 'Transaction', on_delete='set null', copy=False),
}
def _get_errors(self, cr, uid, order, context=None):
return []
def _get_website_data(self, cr, uid, order, context):
return {
'partner': order.partner_id.id,
'order': order
}
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
for so in self.browse(cr, uid, ids, context=context):
domain = [('order_id', '=', so.id), ('product_id', '=', product_id)]
if line_id:
domain += [('id', '=', line_id)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, qty=0, line_id=None, context=None):
so = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
values = self.pool.get('sale.order.line').product_id_change(cr, SUPERUSER_ID, [],
pricelist=so.pricelist_id.id,
product=product_id,
partner_id=so.partner_id.id,
fiscal_position=so.fiscal_position.id,
qty=qty,
context=context
)['value']
if line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
values['name'] = line.name
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
values['name'] = "%s\n%s" % (product.display_name, product.description_sale)
values['product_id'] = product_id
value
|
s['order_id'] = order_id
if values.get('tax_id') != None:
values['tax_id'] = [(6, 0, values['tax_id'])]
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
sol = self.pool.get('sale.order.line')
quantity = 0
for so
|
in self.browse(cr, uid, ids, context=context):
if line_id != False:
line_ids = so._cart_find_product_line(product_id, line_id, context=context, **kwargs)
if line_ids:
line_id = line_ids[0]
# Create line if no line with product_id can be located
if not line_id:
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, qty=1, context=context)
line_id = sol.create(cr, SUPERUSER_ID, values, context=context)
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty != None:
quantity = sol.browse(cr, SUPERUSER_ID, line_id, context=context).product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
sol.unlink(cr, SUPERUSER_ID, [line_id], context=context)
else:
# update line
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, qty=quantity, line_id=line_id, context=context)
values['product_uom_qty'] = quantity
sol.write(cr, SUPERUSER_ID, [line_id], values, context=context)
return {'line_id': line_id, 'quantity': quantity}
def _cart_accessories(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
s = set(j.id for l in (order.website_order_line or []) for j in (l.product_id.accessory_product_ids or []))
s -= set(l.product_id.id for l in order.order_line)
product_ids = random.sample(s, min(len(s),3))
return self.pool['product.product'].browse(cr, uid, product_ids, context=context)
class website(orm.Model):
_inherit = 'website'
_columns = {
'pricelist_id': fields.related('user_id','partner_id','property_product_pricelist',
type='many2one', relation='product.pricelist', string='Default Pricelist'),
'currency_id': fields.related('pricelist_id','currency_id',
type='many2one', relation='res.currency', string='Default Currency'),
}
def sale_product_domain(self, cr, uid, ids, context=None):
return [("sale_ok", "=", True)]
def sale_get_order(self, cr, uid, ids, force_create=False, code=None, update_pricelist=None, context=None):
sale_order_obj = self.pool['sale.order']
sale_order_id = request.session.get('sale_order_id')
sale_order = None
# create so if needed
if not sale_order_id and (force_create or code):
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
for w in self.browse(cr, uid, ids):
values = {
'user_id': w.user_id.id,
'partner_id': partner.id,
'pricelist_id': partner.property_product_pricelist.id,
'section_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'website', 'salesteam_website_sales')[1],
}
sale_order_id = sale_order_obj.create(cr, SUPERUSER_ID, values, context=context)
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner.id, context=context)['value']
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
request.session['sale_order_id'] = sale_order_id
if sale_order_id:
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order_id, context=context)
if not sale_order.exists():
request.session['sale_order_id'] = None
return None
# check for change of pricelist with a coupon
if code and code != sale_order.pricelist_id.code:
pricelist_ids = self.pool['product.pricelist'].search(cr, SUPERUSER_ID, [('code', '=', code)], context=context)
if pricelist_ids:
pricelist_id = pricelist_ids[0]
request.session['sale_order_code_pricelist_id'] = pricelist_id
update_pricelist = True
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id
# check for change of partner_id ie after signup
if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id:
flag_pricelist = False
if pricelist_id != sale_order.pricelist_id.id:
flag_pricelist = True
fiscal_position = sale_order.fiscal_position and sale_order.fiscal_position.id or False
values = sal
|
ebattenberg/Lasagne
|
lasagne/tests/test_objectives.py
|
Python
|
mit
| 9,755
| 0
|
import mock
import numpy as np
import theano
import pytest
class TestObjectives:
@pytest.fixture
def input_layer(self, value):
from lasagne.layers import InputLayer
shape = np.array(value).shape
x = theano.shared(value)
return InputLayer(shape, input_var=x)
@pytest.fixture
def get_loss(self, loss_function, output, target, aggregation=None):
from lasagne.objectives import Objective
input_layer = self.input_layer(output)
obj = Objective(input_layer, loss_function)
return obj.get_loss(target=target, aggregation=aggregation)
@pytest.fixture
def get_masked_loss(self, loss_function, output, target, mask,
aggregation=None):
from lasagne.objectives import MaskedObjective
input_layer = self.input_layer(output)
obj = MaskedObjective(input_layer, loss_function)
return obj.get_loss(target=target, mask=mask,
aggregation=aggregation)
def test_mse(self):
from lasagne.objectives import mse
output = np.array([
[1.0, 0.0, 3.0, 0.0],
[-1.0, 0.0, -1.0, 0.0],
])
target = np.zeros((2, 4))
mask = np.array([[1.0], [0.0]])
mask_2d = np.array([[1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0]])
# Sqr-error sum = 1**2 + (-1)**2 + (-1)**2 + 3**2 = 12
# Mean is 1.5
result = self.get_loss(mse, output, target, aggregation='mean')
assert result.eval() == 1.5
result = self.get_loss(mse, output, target, aggregation='sum')
assert result.eval() == 12
# Masked error sum is 1**2 + 3**2
result_with_mask = self.get_masked_loss(mse, output, target,
mask, aggregation='sum')
assert result_with_mask.eval() == 10
result_with_mask = self.get_masked_loss(mse, output, target,
mask_2d, aggregation='sum')
assert result_with_mask.eval() == 10
result_with_mask = self.get_masked_loss(mse, output, target,
mask, aggregation='mean')
assert result_with_mask.eval() == 10/8.0
result_with_mask = self.get_masked_loss(mse, output, target,
mask_2d, aggregation='mean')
assert result_with_mask.eval() == 10/8.0
result_with_mask = self.get_masked_loss(mse, output, target,
mask, aggregation=None)
assert result_with_mask.eval() == 10/8.0
result_with_mask = self.get_masked_loss(mse, output, target,
mask_2d, aggregation=None)
assert result_with_mask.eval() == 10/8.0
result_with_mask = self.get_masked_loss(mse, output, target, mask,
aggregation='normalized_sum')
assert result_with_mask.eval() == 10
result_with_mask = self.get_masked_loss(mse, output, target, mask_2d,
aggregation='normalized_sum')
assert result_with_mask.eval() == 10/4.0
def test_binary_crossentropy(self):
from lasagne.objectives import binary_crossentropy
output = np.array([
[np.e ** -2]*4,
[np.e ** -1]*4,
])
target = np.ones((2, 4))
mask = np.array([[0.0], [1.0]])
mask_2d = np.array([[0.0]*4,
[1.0]*4])
# Cross entropy sum is (2*4) + (1*4) = 12
# Mean is 1.5
result = self.get_loss(binary_crossentropy, output, target,
aggregation='mean')
assert result.eval() == 1.5
result = self.get_loss(binary_crossentropy, output, target,
aggregation='sum')
assert result.eval() == 12
# Masked cross entropy sum is 1*4*1 = 4
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask,
aggregation='sum')
assert result_with_mask.eval() == 4
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask_2d,
aggregation='sum')
assert result_with_mask.eval() == 4
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask,
aggregation='mean')
assert result_with_mask.eval() == 1/2.0
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask_2d,
aggregation='mean')
assert result_with_mask.eval() == 1/2.0
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask,
aggregation='normalized_sum')
assert result_with_mask.eval() == 4
result_with_mask = self.get_masked_loss(binary_crossentropy,
output, target, mask_2d,
aggregation='normalized_sum')
assert result_with_mask.eval() == 1
def test_categorical_crossentropy(self):
from lasagne.objectives import categorical_crossentropy
output = np.array([
[1.0, 1.0-np.e**-1, np.e**-1],
[1.0-np.e**-2, np.e**-2, 1.0],
[1.0-np.e**-3, 1.0, np.e**-3]
])
target_1hot = np.array([2, 1, 2])
target_2d = np.array([
[0.0, 0.0, 1.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0],
])
mask_1hot = np.array([0, 1, 1])
# Multinomial NLL sum is 1 + 2 + 3 = 6
# Mean is 2
result = self.get_loss(categorical_crossentropy, output, target_1hot,
aggregation='mean')
assert result.eval() == 2
result = self.get_loss(categorical_crossentropy, output, target_1hot,
|
aggregation='sum')
assert result.eval() == 6
# Multinomial NLL sum is (0*0 + 1*0 + 1*1) + (2*0 + 2*1 + 0*0)
# + (3*0 + 0*0 + 3*1) = 6
# Mean is 2
result = self.get_loss(categorical_crossentropy, output, target_2d,
aggregation='mean')
assert result.eval() == 2
result = self.get_loss(categorical_crossentropy, out
|
put, target_2d,
aggregation='sum')
assert result.eval() == 6
# Masked NLL sum is 2 + 3 = 5
result_with_mask = self.get_masked_loss(categorical_crossentropy,
output, target_1hot,
mask_1hot,
aggregation='sum')
assert result_with_mask.eval() == 5
# Masked NLL sum is 2 + 3 = 5
result_with_mask = self.get_masked_loss(categorical_crossentropy,
output, target_2d, mask_1hot,
aggregation='mean')
assert abs(result_with_mask.eval() - 5.0/3.0) < 1.0e-9
# Masked NLL sum is 2 + 3 = 5
result_with_mask = self.get_masked_loss(categorical_crossentropy,
output, target_2d, mask_1hot,
aggregation='normalized_sum')
assert result_with_mask.eval() == 5.0/2.0
def test_objective(self):
from lasagne.objectives import Objective
from lasagne.layers.input import Layer, InputLayer
input_layer = mock.Mock(InputLayer((None,)), output_shape=(None,))
layer = mock.Mock(Layer(input_layer), output_shape=(Non
|
recognai/spaCy
|
spacy/lang/ru/lemmatizer.py
|
Python
|
mit
| 6,860
| 0.000729
|
# coding: utf8
from ...symbols import (
ADJ, DET, NOUN, NUM, PRON, PROPN, PUNCT, VERB, POS
)
from ...lemmatizer import Lemmatizer
class RussianLemmatizer(Lemmatizer):
_morph = None
def __init__(self):
super(RussianLemmatizer, self).__init__()
try:
from pymorphy2 import MorphAnalyzer
except ImportError:
raise ImportError(
'The Russian lemmatizer requires the pymorphy2 library: '
'try to fix it with "pip install pymorphy2==0.8"')
if RussianLemmatizer._morph is None:
RussianLemmatizer._morph = MorphAnalyzer()
def __call__(self, string, univ_pos, morphology=None):
univ_pos = self.normalize_univ_pos(univ_pos)
if univ_pos == 'PUNCT':
return [PUNCT_RULES.get(string, string)]
if univ_pos not in ('ADJ', 'DET', 'NOUN', 'NUM', 'PRON', 'PROPN', 'VERB'):
# Skip unchangeable pos
return [string.lower()]
analyses = self._morph.parse(string)
filtered_analyses = []
for analysis in analyses:
if not analysis.is_known:
# Skip suggested parse variant for unknown word for pymorphy
continue
analysis_pos, _ = oc2ud(str(analysis.tag))
if analysis_pos == univ_pos \
or (analysis_pos in ('NOUN', 'PROPN') and univ_pos in ('NOUN', 'PROPN')):
filtered_analyses.append(analysis)
if not len(filtered_analyses):
return [string.lower()]
if morphology is None or (len(morphology) == 1 and POS in morphology):
return list(set([analysis.normal_form for analysis in filtered_analyses]))
if univ_pos in ('ADJ', 'DET', 'NOUN', 'PROPN'):
features_to_compare = ['Case', 'Number', 'Gender']
elif univ_pos == 'NUM':
features_to_compare = ['Case', 'Gender']
elif univ_pos == 'PRON':
features_to_compare = ['Case', 'Number', 'Gender', 'Person']
else: # VERB
features_to_compare = ['Aspect', 'Gender', 'Mood', 'Number', 'Tense', 'VerbForm', 'Voice']
analyses, filtered_analyses = filtered_analyses, []
for analysis in analyses:
_, analysis_morph = oc2ud(str(analysis.tag))
for feature in features_to_compare:
if (feature in morphology and feature in analysis_morph
and morphology[feature] != analysis_morph[feature]):
break
else:
filtered_analyses.append(analysis)
if not len(filtered_analyses):
return [string.lower()]
return list(set([analysis.normal_form for analysis in filtered_analyses]))
@staticmethod
def normalize_univ_pos(univ_pos):
if isinstance(univ_pos, str):
return univ_pos.upper()
symbols_to_str = {
ADJ: 'ADJ',
DET: 'DET',
NOUN: 'NOUN',
NUM: 'NUM',
PRON: 'PRON',
PROPN: 'PROPN',
PUNCT: 'PUNCT',
VERB: 'VERB'
}
if univ_pos in symbols_to_str:
return symbols_to_str[univ_pos]
return None
def is_base_form(self, univ_pos, morphology=None):
# TODO
raise NotImplementedError
def det(self, string, morphology=None):
return self(string, 'det', morphology)
def num(self, string, morphology=None):
return self(string, 'num', morphology)
def pron(self, string, morphology=None):
return self(string, 'pron', morphology)
def lookup(self, string):
analyses = self._morph.parse(string)
if len(analyses) == 1:
return analyses[0].normal_form
return string
def oc2ud(oc_tag):
gram_map = {
'_POS': {
|
'ADJF': 'ADJ',
'ADJS': 'ADJ',
'ADVB': 'ADV',
'Apro': 'DET',
'COMP': 'ADJ', # C
|
an also be an ADV - unchangeable
'CONJ': 'CCONJ', # Can also be a SCONJ - both unchangeable ones
'GRND': 'VERB',
'INFN': 'VERB',
'INTJ': 'INTJ',
'NOUN': 'NOUN',
'NPRO': 'PRON',
'NUMR': 'NUM',
'NUMB': 'NUM',
'PNCT': 'PUNCT',
'PRCL': 'PART',
'PREP': 'ADP',
'PRTF': 'VERB',
'PRTS': 'VERB',
'VERB': 'VERB',
},
'Animacy': {
'anim': 'Anim',
'inan': 'Inan',
},
'Aspect': {
'impf': 'Imp',
'perf': 'Perf',
},
'Case': {
'ablt': 'Ins',
'accs': 'Acc',
'datv': 'Dat',
'gen1': 'Gen',
'gen2': 'Gen',
'gent': 'Gen',
'loc2': 'Loc',
'loct': 'Loc',
'nomn': 'Nom',
'voct': 'Voc',
},
'Degree': {
'COMP': 'Cmp',
'Supr': 'Sup',
},
'Gender': {
'femn': 'Fem',
'masc': 'Masc',
'neut': 'Neut',
},
'Mood': {
'impr': 'Imp',
'indc': 'Ind',
},
'Number': {
'plur': 'Plur',
'sing': 'Sing',
},
'NumForm': {
'NUMB': 'Digit',
},
'Person': {
'1per': '1',
'2per': '2',
'3per': '3',
'excl': '2',
'incl': '1',
},
'Tense': {
'futr': 'Fut',
'past': 'Past',
'pres': 'Pres',
},
'Variant': {
'ADJS': 'Brev',
'PRTS': 'Brev',
},
'VerbForm': {
'GRND': 'Conv',
'INFN': 'Inf',
'PRTF': 'Part',
'PRTS': 'Part',
'VERB': 'Fin',
},
'Voice': {
'actv': 'Act',
'pssv': 'Pass',
},
'Abbr': {
'Abbr': 'Yes'
}
}
pos = 'X'
morphology = dict()
unmatched = set()
grams = oc_tag.replace(' ', ',').split(',')
for gram in grams:
match = False
for categ, gmap in sorted(gram_map.items()):
if gram in gmap:
match = True
if categ == '_POS':
pos = gmap[gram]
else:
morphology[categ] = gmap[gram]
if not match:
unmatched.add(gram)
while len(unmatched) > 0:
gram = unmatched.pop()
if gram in ('Name', 'Patr', 'Surn', 'Geox', 'Orgn'):
pos = 'PROPN'
elif gram == 'Auxt':
pos = 'AUX'
elif gram == 'Pltm':
morphology['Number'] = 'Ptan'
return pos, morphology
PUNCT_RULES = {
"«": "\"",
"»": "\""
}
|
jtakayama/makahiki-draft
|
makahiki/apps/widgets/smartgrid_design/migrations/0001_initial.py
|
Python
|
mit
| 15,933
| 0.007218
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DesignerTextPromptQuestion'
db.create_table('smartgrid_design_designertextpromptquestion', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('action', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerAction'])),
('question', self.gf('django.db.models.fields.TextField')()),
('answer', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('smartgrid_design', ['DesignerTextPromptQuestion'])
# Adding model 'DesignerQuestionChoice'
db.create_table('smartgrid_design_designerquestionchoice', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerTextPromptQuestion'])),
('action', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerAction'])),
('choice', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('smartgrid_design', ['DesignerQuestionChoice'])
# Adding model 'DesignerLevel'
db.create_table('smartgrid_design_designerlevel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, db_index=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=1)),
('unlock_condition', self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True)),
('unlock_condition_text', self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True)),
))
db.send_create_signal('smartgrid_design', ['DesignerLevel'])
# Adding model 'DesignerCategory'
db.create_table('smartgrid_design_designercategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, db_index=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal('smartgrid_design', ['DesignerCategory'])
# Adding model 'DesignerAction'
db.create_table('smartgrid_design_designeraction', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50, db_index=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
|
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=255, null=True, blank=True)),
('video_id', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('video_source',
|
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('embedded_widget', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('type', self.gf('django.db.models.fields.CharField')(max_length=20)),
('level', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerLevel'], null=True, blank=True)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['smartgrid_design.DesignerCategory'], null=True, blank=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=1000)),
('pub_date', self.gf('django.db.models.fields.DateField')(default=datetime.date(2013, 3, 19))),
('expire_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('unlock_condition', self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True)),
('unlock_condition_text', self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True)),
('related_resource', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
('social_bonus', self.gf('django.db.models.fields.IntegerField')(default=0)),
('point_value', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('smartgrid_design', ['DesignerAction'])
# Adding model 'Activity'
db.create_table('smartgrid_design_activity', (
('designeraction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['smartgrid_design.DesignerAction'], unique=True, primary_key=True)),
('expected_duration', self.gf('django.db.models.fields.IntegerField')()),
('point_range_start', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('point_range_end', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('confirm_type', self.gf('django.db.models.fields.CharField')(default='text', max_length=20)),
('confirm_prompt', self.gf('django.db.models.fields.TextField')(blank=True)),
('admin_note', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('smartgrid_design', ['Activity'])
# Adding model 'Commitment'
db.create_table('smartgrid_design_commitment', (
('designeraction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['smartgrid_design.DesignerAction'], unique=True, primary_key=True)),
('commitment_length', self.gf('django.db.models.fields.IntegerField')(default=5)),
))
db.send_create_signal('smartgrid_design', ['Commitment'])
# Adding model 'Event'
db.create_table('smartgrid_design_event', (
('designeraction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['smartgrid_design.DesignerAction'], unique=True, primary_key=True)),
('expected_duration', self.gf('django.db.models.fields.IntegerField')()),
('event_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('event_location', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('event_max_seat', self.gf('django.db.models.fields.IntegerField')(default=1000)),
))
db.send_create_signal('smartgrid_design', ['Event'])
# Adding model 'Filler'
db.create_table('smartgrid_design_filler', (
('designeraction_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['smartgrid_design.DesignerAction'], unique=True, primary_key=True)),
))
db.send_create_signal('smartgrid_design', ['Filler'])
def backwards(self, orm):
# Deleting model 'DesignerTextPromptQuestion'
db.delete_table('smartgrid_design_designertextpromptquestion')
# Deleting model 'DesignerQuestionChoice'
db.delete_table('smartgrid_design_designerquestionchoice')
# Deleting model 'DesignerLevel'
db.delete_table('smartgrid_design_designerlevel')
# Deleting model 'DesignerCategory'
db.delete_table('smartgrid_design_designercategory')
# Deleting model 'DesignerAction'
db.delete_table('smartgrid_design_designeraction')
# Deleting model 'Activity'
db.delete_table('smartgrid_design_activity')
# Deleting mo
|
florian-dacosta/stock-logistics-warehouse
|
stock_reserve/__openerp__.py
|
Python
|
agpl-3.0
| 2,185
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Stock Reservation',
'summary': 'Stock reservations on products',
'version': '0.2',
'author': "Camptocamp,Odoo Community Association (OCA)",
'category': 'Warehouse',
'license': 'AGPL-3',
'complexity': 'normal',
'images': [],
'website': "http://www.camptocamp.com",
'description': """
Stock Reservation
=================
Allows to create stock reservations on products.
Each reservation can have a validity date, once passed, the reservation
is automatically lifted.
The reserved products are substracted from the virtual stock. It means
that if you res
|
erved a quantity of products which bring the virtual
stock below the minimum, the orderpoint will be triggered and new
purchase orders will be generated. It also implies that the max may be
exceeded if the reservations are canceled.
Contributors
------------
* G
|
uewen Baconnier <guewen.baconnier@camptocamp.com>
* Yannick Vaucher <yannick.vaucher@camptocamp.com>
""",
'depends': ['stock',
],
'demo': [],
'data': ['view/stock_reserve.xml',
'view/product.xml',
'data/stock_data.xml',
'security/ir.model.access.csv',
],
'auto_install': False,
'test': ['test/stock_reserve.yml',
],
'installable': True,
}
|
novafloss/django-compose-settings
|
tests/fixtures/my_app/settings/post.py
|
Python
|
mit
| 161
| 0
|
import __settings__
from __sett
|
ings__ import INSTALLED_APPS
assert hasattr(__settings__, 'BASE_DIR'), 'BASE_DIR required'
|
INSTALLED_APPS += (
'post',
)
|
Toilal/rebulk
|
rebulk/test/test_validators.py
|
Python
|
mit
| 2,170
| 0.00553
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name,len-as-condition
from functools import partial
from rebulk.pattern import StringPattern
from ..validators import chars_before, chars_after, chars_surround, validators
chars = ' _.'
left = partial(chars_before, chars)
right = partial(chars_after, chars)
surrounding = partial(chars_surround, chars)
def test_left_chars():
matches = list(StringPattern("word", validator=left).matches("xxxwordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=left).matches("xxx_wordxxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=left).matches("wordxxx"))
assert len(matches) == 1
def test_right_chars():
matches = list(StringPattern("word", validator=right).matches("xxxwor
|
dxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=right).matches("xxxword.xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=right).matches("xxxword"))
assert len(mat
|
ches) == 1
def test_surrounding_chars():
matches = list(StringPattern("word", validator=surrounding).matches("xxxword xxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=surrounding).matches("xxx.wordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=surrounding).matches("xxx word_xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=surrounding).matches("word"))
assert len(matches) == 1
def test_chain():
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxxword xxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx.wordxxx"))
assert len(matches) == 0
matches = list(StringPattern("word", validator=validators(left, right)).matches("xxx word_xxx"))
assert len(matches) == 1
matches = list(StringPattern("word", validator=validators(left, right)).matches("word"))
assert len(matches) == 1
|
wengzhilai/family
|
iSoft/dal/QueryDal.py
|
Python
|
bsd-3-clause
| 3,612
| 0.000573
|
from iSoft.entity.model import db, FaQuery
import math
import json
from iSoft.model.AppReturnDTO import AppReturnDTO
from iSoft.core.Fun import Fun
import re
class QueryDal(FaQuery):
def __init__(self):
pass
def query_findall(self, pageIndex, pageSize, criterion, where):
relist, is_succ = Fun.model_findall(FaQuery, pageIndex, pageSize,
criterion, where)
return relist, is_succ
def query_Save(self, in_dict, saveKeys):
jsonStr = re.sub(r'\r|\n| ', "", in_dict["QUERY_CFG_JSON"])
jsonStr = re.sub(r'"onComponentInitFunction"((.|\n)+?)},', "", jsonStr)
jsonStr = re.sub(r',},', ",", jsonStr)
try:
x = json.loads(jsonStr)
except :
return None, AppReturnDTO(False, "列配置信息有误")
relist, is_succ = Fun.model_save(FaQuery, self, in_dict, saveKeys)
return relist, is_succ
def query_delete(self, key):
is_succ = Fun.model_delete(FaQuery, key)
return is_succ
def query_single(self, key):
relist, is_succ = Fun.model_single(FaQuery, key)
return relist, is_succ
def query_singleByCode(self, code):
db_ent = FaQuery.query.filter(FaQuery.CODE == code).first()
if db_ent is None:
return db_ent, AppReturnDTO(False, "代码不存在")
return db_ent, AppReturnDTO(True)
# 查看数据
def query_queryByCode(self, code, pageIndex, pageSize, criterion, where):
sql, cfg, msg = self.query_GetSqlByCode(code, criterion, where)
if not msg.IsSuccess:
return sql, msg
relist = db.session.execute(sql)
num = relist.rowcount
relist.close()
if pageIndex < 1:
pageSize = 1
if pageSize < 1:
pageSize = 10
# 最大页码
max_page = math.ceil(num / pageSize) # 向上取整
if pageIndex > max_page:
return None, AppReturnDTO(True, num)
pageSql = "{0} LIMIT {1},{2}".format(sql, (pageIndex - 1) * pageSize,
pageSize)
allData, msg = Fun.sql_to_dict(pageSql)
if msg.IsSuccess:
msg.Msg = num
# relist = relist.paginate(pageIndex, per_page=pageSize).items
return allData, msg
def query_GetSqlByCode(self, code, criterion, where):
"""
根据查询代码运算出查询的SQL
用于导出数据,并统一管理配置的SQL
返回SQL和配置
"""
db_ent = FaQuery.query.filter(FaQuery.CODE == code).first()
if db_ent is None:
return "", "", AppReturnDTO(False, "代码不存在")
sql = db_ent.QUERY_CONF
orderArr = []
for order in criterion:
orderArr.append("T.%(Key)s %(Value)s" % order)
whereArr = []
for search in where:
if search["Type"] == "like":
whereArr.append("T.%(Key)s like ('%%%(Value)s%%')" % search)
else:
whereArr.append("T.%(Key)s %(Type)s
|
%(Value)s " % search)
sql = "SELECT * FROM ({0}) T{1}{2}".format(
|
sql,
" WHERE " + " AND ".join(whereArr) if len(whereArr) > 0 else "",
" ORDER BY " + " , ".join(orderArr) if len(orderArr) > 0 else "",
)
jsonStr = re.sub(r'\r|\n| ', "", db_ent.QUERY_CFG_JSON)
jsonStr = re.sub(r'"onComponentInitFunction"((.|\n)+?)},', "", jsonStr)
jsonStr = re.sub(r',},', ",", jsonStr)
return sql, json.loads(jsonStr), AppReturnDTO(True)
|
PredictionIO/open-academy
|
KairatAshim/pio_assignment2/problem2/problem2.py
|
Python
|
apache-2.0
| 1,046
| 0.013384
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
import math
import scipy.special as sps
mean = 0
variance = 1
sigma = math.sqrt(variance)
def drawSampleNormal(sampleSize):
samples = np.random.normal(me
|
an, sigma, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
plt.plot(bins,mlab.normpdf(bins,mean,sigma))
plt.show()
plt.savefig("normal_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleNormal(20)
drawSampleNormal(50)
drawSampleNormal(100)
drawSampleNorma
|
l(500)
alpha = 7.5
beta = 10
def drawSampleGamma(sampleSize):
samples = np.random.gamma(alpha, beta, sampleSize)
count, bins, ignored = plt.hist(samples, 80, normed=True)
pdf = bins**(alpha-1)*(np.exp(-bins/beta) / (sps.gamma(alpha)*beta**alpha))
plt.plot(bins, pdf, linewidth=2, color='r')
plt.show()
plt.savefig("gamma_" + str(sampleSize) + "_samples.png")
plt.clf()
drawSampleGamma(20)
drawSampleGamma(50)
drawSampleGamma(100)
drawSampleGamma(500)
|
marinkaz/orange3
|
Orange/widgets/tests/test_settings_handler.py
|
Python
|
bsd-2-clause
| 7,342
| 0
|
from io import BytesIO
import os
import pickle
from tempfile import mkstemp
import unittest
from unittest.mock import patch, Mock
import warnings
from Orange.widgets.settings import SettingsHandler, Setting, SettingProvider
class SettingHandlerTestCase(unittest.TestCase):
@patch('Orange.widgets.settings.SettingProvider', create=True)
def test_create(self, SettingProvider):
""":type SettingProvider: unittest.mock.Mock"""
with patch.object(SettingsHandler, 'read_defaults'):
handler = SettingsHandler.create(SimpleWidget)
self.assertEqual(handler.widget_class, SimpleWidget)
# create needs to create a SettingProvider which traverses
# the widget definition and collects all settings and read
# all settings and for widget class
SettingProvider.assert_called_once_with(SimpleWidget)
SettingsHandler.read_defaults.assert_called_once_with()
def test_create_uses_template_if_provided(self):
template = SettingsHandler()
template.read_defaults = lambda: None
template.a = 'a'
template.b = 'b'
handler = SettingsHandler.create(SimpleWidget, template)
self.assertEqual(handler.a, 'a')
self.assertEqual(handler.b, 'b')
# create should copy the template
handler.b = 'B'
self.assertEqual(template.b, 'b')
def test_read_defaults(self):
default_settings = {'a': 5, 'b': {1: 5}}
fd, settings_file = mkstemp(suffix='.ini')
with open(settings_file, 'wb') as f:
pickle.dump(default_settings, f)
os.close(fd)
handler = SettingsHandler()
handler._get_settings_filename = lambda: settings_file
handler.read_defaults()
self.assertEqual(handler.defaults, default_settings)
os.remove(settings_file)
def test_write_defaults(self):
fd, settings_file = mkstemp(suffix='.ini')
handler = SettingsHandler()
handler.defaults = {'a': 5, 'b': {1: 5}}
handler._get_settings_filename = lambda: settings_file
handler.write_defaults()
with open(settings_file, 'rb') as f:
default_settings = pickle.load(f)
os.close(fd)
self.assertEqual(handler.defaults, default_settings)
os.remove(settings_file)
def test_initialize_widget(self):
handler = SettingsHandler()
handler.defaults = {'default': 42, 'setting': 1}
handler.provider = provider = Mock()
provider.get_provider.return_value = provider
widget = SimpleWidget()
def reset_provider():
provider.get_provider.return_value = None
provider.reset_mock()
provider.get_provider.return_value = provider
# No data
handler.initialize(widget)
provider.initialize.assert_called_once_with(widget, {'default': 42,
'setting': 1})
# Dictionary data
reset_provider()
handler.initia
|
lize(widget, {'setting': 5})
provider.initialize.assert_called_once_with(widget, {'default': 42,
'setting': 5})
# Pickled data
reset_provider()
handler.initia
|
lize(widget, pickle.dumps({'setting': 5}))
provider.initialize.assert_called_once_with(widget, {'default': 42,
'setting': 5})
def test_initialize_component(self):
handler = SettingsHandler()
handler.defaults = {'default': 42}
provider = Mock()
handler.provider = Mock(get_provider=Mock(return_value=provider))
widget = SimpleWidget()
# No data
handler.initialize(widget)
provider.initialize.assert_called_once_with(widget, None)
# Dictionary data
provider.reset_mock()
handler.initialize(widget, {'setting': 5})
provider.initialize.assert_called_once_with(widget, {'setting': 5})
# Pickled data
provider.reset_mock()
handler.initialize(widget, pickle.dumps({'setting': 5}))
provider.initialize.assert_called_once_with(widget, {'setting': 5})
@patch('Orange.widgets.settings.SettingProvider', create=True)
def test_initialize_with_no_provider(self, SettingProvider):
""":type SettingProvider: unittest.mock.Mock"""
handler = SettingsHandler()
handler.provider = Mock(get_provider=Mock(return_value=None))
provider = Mock()
SettingProvider.return_value = provider
widget = SimpleWidget()
# initializing an undeclared provider should display a warning
with warnings.catch_warnings(record=True) as w:
handler.initialize(widget)
self.assertEqual(1, len(w))
SettingProvider.assert_called_once_with(SimpleWidget)
provider.initialize.assert_called_once_with(widget, None)
def test_fast_save(self):
handler = SettingsHandler()
handler.read_defaults = lambda: None
handler.bind(SimpleWidget)
widget = SimpleWidget()
handler.fast_save(widget, 'component.int_setting', 5)
self.assertEqual(
handler.known_settings['component.int_setting'].default, 5)
self.assertEqual(Component.int_setting.default, 42)
handler.fast_save(widget, 'non_setting', 4)
def test_fast_save_siblings_spill(self):
handler_mk1 = SettingsHandler()
handler_mk1.read_defaults = lambda: None
handler_mk1.bind(SimpleWidgetMk1)
widget_mk1 = SimpleWidgetMk1()
handler_mk1.fast_save(widget_mk1, "setting", -1)
handler_mk1.fast_save(widget_mk1, "component.int_setting", 1)
self.assertEqual(
handler_mk1.known_settings['setting'].default, -1)
self.assertEqual(
handler_mk1.known_settings['component.int_setting'].default, 1)
handler_mk1.initialize(widget_mk1, data=None)
handler_mk1.provider.providers["component"].initialize(
widget_mk1.component, data=None)
self.assertEqual(widget_mk1.setting, -1)
self.assertEqual(widget_mk1.component.int_setting, 1)
handler_mk2 = SettingsHandler()
handler_mk2.read_defaults = lambda: None
handler_mk2.bind(SimpleWidgetMk2)
widget_mk2 = SimpleWidgetMk2()
handler_mk2.initialize(widget_mk2, data=None)
handler_mk2.provider.providers["component"].initialize(
widget_mk2.component, data=None)
self.assertEqual(widget_mk2.setting, 42,
"spils defaults into sibling classes")
self.assertEqual(Component.int_setting.default, 42)
self.assertEqual(widget_mk2.component.int_setting, 42,
"spils defaults into sibling classes")
class Component:
int_setting = Setting(42)
class SimpleWidget:
setting = Setting(42)
non_setting = 5
component = SettingProvider(Component)
def __init__(self):
self.component = Component()
class SimpleWidgetMk1(SimpleWidget):
pass
class SimpleWidgetMk2(SimpleWidget):
pass
class WidgetWithNoProviderDeclared:
def __init__(self):
self.undeclared_component = Component()
|
campagnola/acq4
|
acq4/pyqtgraph/exporters/SVGExporter.py
|
Python
|
mit
| 17,330
| 0.011541
|
from .Exporter import Exporter
from ..python2_3 import asUnicode
from ..parametertree import Parameter
from ..Qt import QtGui, QtCore, QtSvg, QT_LIB
from .. import debug
from .. import functions as fn
import re
import xml.dom.minidom as xml
import numpy as np
__all__ = ['SVGExporter']
class SVGExporter(Exporter):
Name = "Scalable Vector Graphics (SVG)"
allowCopy=True
def __init__(self, item):
Exporter.__init__(self, item)
#tr = self.getTargetRect()
self.params = Parameter(name='params', type='group', children=[
#{'name': 'width', 'type': 'float', 'value': tr.width(), 'limits': (0, None)},
#{'name': 'height', 't
|
ype': 'float', 'value': tr.height(), 'limits': (0, Non
|
e)},
#{'name': 'viewbox clipping', 'type': 'bool', 'value': True},
#{'name': 'normalize coordinates', 'type': 'bool', 'value': True},
{'name': 'scaling stroke', 'type': 'bool', 'value': False, 'tip': "If False, strokes are non-scaling, "
"which means that they appear the same width on screen regardless of how they are scaled or how the view is zoomed."},
])
#self.params.param('width').sigValueChanged.connect(self.widthChanged)
#self.params.param('height').sigValueChanged.connect(self.heightChanged)
def widthChanged(self):
sr = self.getSourceRect()
ar = sr.height() / sr.width()
self.params.param('height').setValue(self.params['width'] * ar, blockSignal=self.heightChanged)
def heightChanged(self):
sr = self.getSourceRect()
ar = sr.width() / sr.height()
self.params.param('width').setValue(self.params['height'] * ar, blockSignal=self.widthChanged)
def parameters(self):
return self.params
def export(self, fileName=None, toBytes=False, copy=False):
if toBytes is False and copy is False and fileName is None:
self.fileSaveDialog(filter="Scalable Vector Graphics (*.svg)")
return
## Qt's SVG generator is not complete. (notably, it lacks clipping)
## Instead, we will use Qt to generate SVG for each item independently,
## then manually reconstruct the entire document.
options = {ch.name():ch.value() for ch in self.params.children()}
xml = generateSvg(self.item, options)
if toBytes:
return xml.encode('UTF-8')
elif copy:
md = QtCore.QMimeData()
md.setData('image/svg+xml', QtCore.QByteArray(xml.encode('UTF-8')))
QtGui.QApplication.clipboard().setMimeData(md)
else:
with open(fileName, 'wb') as fh:
fh.write(asUnicode(xml).encode('utf-8'))
xmlHeader = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.2" baseProfile="tiny">
<title>pyqtgraph SVG export</title>
<desc>Generated with Qt and pyqtgraph</desc>
"""
def generateSvg(item, options={}):
global xmlHeader
try:
node, defs = _generateItemSvg(item, options=options)
finally:
## reset export mode for all items in the tree
if isinstance(item, QtGui.QGraphicsScene):
items = list(item.items())
else:
items = [item]
for i in items:
items.extend(i.childItems())
for i in items:
if hasattr(i, 'setExportMode'):
i.setExportMode(False)
cleanXml(node)
defsXml = "<defs>\n"
for d in defs:
defsXml += d.toprettyxml(indent=' ')
defsXml += "</defs>\n"
return xmlHeader + defsXml + node.toprettyxml(indent=' ') + "\n</svg>\n"
def _generateItemSvg(item, nodes=None, root=None, options={}):
## This function is intended to work around some issues with Qt's SVG generator
## and SVG in general.
## 1) Qt SVG does not implement clipping paths. This is absurd.
## The solution is to let Qt generate SVG for each item independently,
## then glue them together manually with clipping.
##
## The format Qt generates for all items looks like this:
##
## <g>
## <g transform="matrix(...)">
## one or more of: <path/> or <polyline/> or <text/>
## </g>
## <g transform="matrix(...)">
## one or more of: <path/> or <polyline/> or <text/>
## </g>
## . . .
## </g>
##
## 2) There seems to be wide disagreement over whether path strokes
## should be scaled anisotropically.
## see: http://web.mit.edu/jonas/www/anisotropy/
## Given that both inkscape and illustrator seem to prefer isotropic
## scaling, we will optimize for those cases.
##
## 3) Qt generates paths using non-scaling-stroke from SVG 1.2, but
## inkscape only supports 1.1.
##
## Both 2 and 3 can be addressed by drawing all items in world coordinates.
profiler = debug.Profiler()
if nodes is None: ## nodes maps all node IDs to their XML element.
## this allows us to ensure all elements receive unique names.
nodes = {}
if root is None:
root = item
## Skip hidden items
if hasattr(item, 'isVisible') and not item.isVisible():
return None
## If this item defines its own SVG generator, use that.
if hasattr(item, 'generateSvg'):
return item.generateSvg(nodes)
## Generate SVG text for just this item (exclude its children; we'll handle them later)
tr = QtGui.QTransform()
if isinstance(item, QtGui.QGraphicsScene):
xmlStr = "<g>\n</g>\n"
doc = xml.parseString(xmlStr)
childs = [i for i in item.items() if i.parentItem() is None]
elif item.__class__.paint == QtGui.QGraphicsItem.paint:
xmlStr = "<g>\n</g>\n"
doc = xml.parseString(xmlStr)
childs = item.childItems()
else:
childs = item.childItems()
tr = itemTransform(item, item.scene())
## offset to corner of root item
if isinstance(root, QtGui.QGraphicsScene):
rootPos = QtCore.QPoint(0,0)
else:
rootPos = root.scenePos()
tr2 = QtGui.QTransform()
tr2.translate(-rootPos.x(), -rootPos.y())
tr = tr * tr2
arr = QtCore.QByteArray()
buf = QtCore.QBuffer(arr)
svg = QtSvg.QSvgGenerator()
svg.setOutputDevice(buf)
dpi = QtGui.QDesktopWidget().logicalDpiX()
svg.setResolution(dpi)
p = QtGui.QPainter()
p.begin(svg)
if hasattr(item, 'setExportMode'):
item.setExportMode(True, {'painter': p})
try:
p.setTransform(tr)
item.paint(p, QtGui.QStyleOptionGraphicsItem(), None)
finally:
p.end()
## Can't do this here--we need to wait until all children have painted as well.
## this is taken care of in generateSvg instead.
#if hasattr(item, 'setExportMode'):
#item.setExportMode(False)
if QT_LIB in ['PySide', 'PySide2']:
xmlStr = str(arr)
else:
xmlStr = bytes(arr).decode('utf-8')
doc = xml.parseString(xmlStr.encode('utf-8'))
try:
## Get top-level group for this item
g1 = doc.getElementsByTagName('g')[0]
## get list of sub-groups
g2 = [n for n in g1.childNodes if isinstance(n, xml.Element) and n.tagName == 'g']
defs = doc.getElementsByTagName('defs')
if len(defs) > 0:
defs = [n for n in defs[0].childNodes if isinstance(n, xml.Element)]
except:
print(doc.toxml())
raise
profiler('render')
## Get rid of group transformation matrices by applying
## transformation to inner coordinates
correctCoordinates(g1, defs, item, options)
profiler('correct')
## decide on a name for this item
baseName = item.__class__.__name__
i = 1
while True:
|
n3wb13/OpenNfrGui-5.0-1
|
lib/python/Plugins/Extensions/MediaPortal/additions/porn/x2search4porn.py
|
Python
|
gpl-2.0
| 7,051
| 0.032203
|
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.keyboardext import VirtualKeyBoardExt
CONFIG = "/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/additions/additions.xml"
class toSearchForPorn(MPScreen):
def __init__(self, session):
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreenCover.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreenCover.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"red" : self.keyRed,
"green" : self.keyGreen,
"yellow" : self.keyYellow
}, -1)
self['title'] = Label("2Search4Porn")
self['name'] = Label("Your Search Requests")
self['ContentTitle'] = Label("Annoyed, typing in your search-words for each Porn-Site again and again?")
self['F1'] = Label(_("Delete"))
self['F2'] = Label(_("Add"))
self['F3'] = Label(_("Edit"))
self.keyLocked = True
self.suchString = ''
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.Searches)
def Searches(self):
self.genreliste = []
self['liste'] = self.ml
if not fileExists(config.mediaportal.watchlistpath.value+"mp_2s4p"):
open(config.mediaportal.watchlistpath.value+"mp_2s4p","w").close()
if fileExists(config.mediaportal.watchlistpath.value+"mp_2s4p"):
fobj = open(config.mediaportal.watchlistpath.value+"mp_2s4p","r")
for line in fobj:
self.genreliste.append((line, None))
fobj.close()
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.keyLocked = False
def SearchAdd(self):
suchString = ""
self.session.openWithCallback(self.SearchAdd1, VirtualKeyBoardExt, title = (_("Enter Search")), text = suchString, is_dialog=True)
def SearchAdd1(self, suchString):
if suchString is not None and suchString != "":
self.genreliste.append((suchString,None))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
def SearchEdit(self):
if len(self.genreliste) > 0:
suchString = self['liste'].getCurrent()[0][0].rstrip()
self.session.openWithCallback(self.SearchEdit1, VirtualKeyBoardExt, title = (_("Enter Search")), text = suchString, is_dialog=True)
def SearchEdit1(self, suchString):
if suchString is not None and suchString != "":
pos = self['liste'].getSelectedIndex()
self.genreliste.pop(pos)
self.genreliste.insert(pos,(suchString,None))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
def SearchCallback(self, suchString):
if suchString is not None and suchString != "":
self.session.open(toSearchForPornBrowse,suchString)
def keyOK(self):
if self.keyLocked:
return
if len(self.genreliste) > 0:
self.SearchCallback(self['liste'].getCurrent()[0][0].rstrip())
def keyRed(self):
if self.keyLocked:
return
if len(self.genreliste) > 0:
self.genreliste.pop(self['liste'].getSelectedIndex())
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
def keyGreen(self):
if self.keyLocked:
return
self.SearchAdd()
def keyYellow(self):
if self.keyLocked:
return
self.SearchEdit()
def keyCancel(self):
if self.keyLocked:
return
self.genreliste.sort(key=lambda t : t[0].lower())
fobj_out = open(config.mediaportal.watchlistpath.value+"mp_2s4p","w")
x = len(self.genreliste)
if x > 0:
for c in range(x):
writeback = self.genreliste[c][0].rstrip()+"\n"
fobj_out.write(writeback)
fobj_out.close()
else:
os.remove(config.mediaportal.watchlistpath.value+"mp_2s4p")
self.close()
class toSearchForPornBrowse(MPScreen):
def __init__(self, session, suchString):
self.suchString = suchString
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("2Search4Porn")
self['ContentTitle'] = Label("Select Site")
self['name'] = Label(_("Selection:"))
self.keyLocked = True
self.pornscreen = None
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadsites)
def loadsites(self):
conf = xml.etree.cElementTree.parse(CONFIG)
for x in conf.getroot():
if x.tag == "set" and x.get("name") == 'additions':
root = x
for x in root:
if x.tag == "plugin":
if x.get("type") == "mod":
if x.get("confcat") == "porn" and x.get("search") == "1":
gz = x.get("gz")
if not config.m
|
ediaportal.showgrauzone.value and gz == "1":
pass
else:
mod = eval("config.mediaportal." + x.get("confopt") + ".value")
if mod:
exec("self.genreliste.append((\""+x.get("name").replace("&","&")+"\", None))")
self.genreliste.sort(key=lambda t : t[0].lower())
self.keyLocked = False
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
def keyOK(self):
if self.keyLocked:
return
auswahl = self['liste'].getCurrent()[0][0]
|
self.suchString = self.suchString.rstrip()
conf = xml.etree.cElementTree.parse("/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/additions/additions.xml")
for x in conf.getroot():
if x.tag == "set" and x.get("name") == 'additions':
root = x
for x in root:
if x.tag == "plugin":
if x.get("type") == "mod":
if x.get("confcat") == "porn" and x.get("search") == "1":
if auswahl == x.get("name").replace("&","&"):
modfile = x.get("modfile")
modfile = "Plugins.Extensions.MediaPortal.additions.%s.%s" % (modfile.split(".")[0], modfile.split(".")[1])
exec("from "+modfile+" import *")
exec("self.suchString = self.suchString.replace(\" \",\""+x.get("delim")+"\")")
exec("Name = \"2Search4Porn - %s\" % (self.suchString)")
exec("Link = \""+x.get("searchurl").replace("&","&")+"\" % (self.suchString)")
print "Name: "+ Name
print "Link: "+ Link
exec("self.session.open("+x.get("searchscreen")+", Link, Name"+x.get("searchparam").replace(""","\"")+")")
|
PlotWatt/webipy
|
examples/ex1.py
|
Python
|
bsd-3-clause
| 1,406
| 0
|
import webipy
import numpy as np
import matplotlib.pyplot as plt
import pylab
import pandas as pd
pylab.rcParams['figure.figsize'] = (15, 11)
@webipy.exports
def plot(x, n=4):
"""
Demo of scatter plot on a polar axis.
Size increases radially in this example and color increases with angle
"""
N = int(x)
r = 2 * np.random.rand(N)
theta = 2 * np.pi * np.random.rand(N)
area = 200 * r**2 * np.random.rand(N)
colors = theta
ax = plt.subplot(111, polar=True)
ax.scatter(theta, r, c=colors, s=area, cmap=plt.cm.hsv)
@webipy.exports
def sine(x):
"""
simple sine wave with x points
uses mpld3
"""
import mpld3
mpld3.enable_notebook()
X = np.linspace(-np.pi, np.pi, int(x), endpoint=True)
C, S = np.cos(X), np.sin(X)
ax = plt.subplot(111
|
)
ax.plot(X, C)
ax.plot(X, S)
return pd.DataFrame({'X': X, 'sine': S, 'cos': C})
@webipy.exports
def sine1(x):
"""
simple sine wave with x points
uses bokeh
"""
from bokeh import mpl
X = np.linspace(-np.pi, np.pi, int(x), endpoint=True)
C, S = np.cos(X), np.sin(X)
ax = plt.subplot(111)
ax.plot(X, C)
ax.plot(X, S)
# mpl.to_bokeh()
@webipy.exports
def bool_params(non_bool1, non_bool2=3, x=True, y=False):
print "non_bools"
pr
|
int "non_bool1:", non_bool1, "non_bool2", non_bool2
print "bools"
print "x:", x, "y:", y
|
rokuz/omim
|
tools/python/generate_local_ads_symbols.py
|
Python
|
apache-2.0
| 1,823
| 0.002194
|
#!/usr/bin/env python
import os
import sys
PREFIX_DELIMITER = '_'
def enumerate_symbols(symbols_folder_path):
symbols = []
for filename in os.listdir(symbols_folder_path):
parts = os.path.splitext(filename)
if parts[1] == ".svg":
symbols.append(parts[0])
return symbols
def check_symbols(symbols):
numbers = set()
for s in symbols:
pos = s.find(PREFIX_DELIMITER)
n = s[:pos]
if pos < 0 or not n.isdigit():
raise ValueError('Symbol ' + s + ' must have a numeric prefix')
elif int(n) in numbers:
raise ValueError('Symbol ' + s + ' has duplicated numeric prefix')
else:
numbers.add(int(n))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: {0} <path_to_omim/data/styles> [<target_path>]'.format(sys.argv[0]))
sys.exit(-1)
path_to_styles = os.path.join(sys.argv[1], 'clear')
if not os.path.isdir(path_to_styles):
print('Invalid path to styles folder')
|
sys.exit(-1)
target_path = ''
if len(sys.argv) >= 3:
target_path = sys.argv[2]
output_name = os.path.join(target_path, 'local_ads_symbols.txt');
if os.path.exists(output_name):
os.remove(output_name)
paths = ['style-clear', 'style-night']
symbols = []
for folder_path in paths:
s = enumerate_symbols(os.path.join(path_to_styles, folder_path, 'symbols-ad'))
if len(symbols) != 0:
symbols.sort()
s.sort()
|
if symbols != s:
raise ValueError('Different symbols set in folders' + str(paths))
else:
symbols = s
check_symbols(symbols)
with open(output_name, "w") as text_file:
for symbol in symbols:
text_file.write(symbol + '\n')
|
palindromed/data-structures
|
src/test_graph.py
|
Python
|
mit
| 10,017
| 0.000399
|
import pytest
# TODO: use same globals for reverse operations such as add, remove
GRAPHS = [
({},
[],
[]),
({'nodeA': {}},
['nodeA'],
[]),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
['nodeA', 'nodeB'],
[('nodeA', 'nodeB')]),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
['nodeA', 'nodeB'],
[('nodeA', 'nodeB'), ('nodeB', 'nodeA')]),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}},
['nodeA', 'nodeB', 'nodeC'],
[('nodeA', 'nodeB'),
('nodeA', 'nodeC'),
('nodeB', 'nodeA'),
('nodeC', 'nodeA'),
('nodeC', 'nodeC')]),
]
GRAPHS_FOR_NODE_INSERT = [
({},
'nodeN',
{'nodeN': {}}),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'}},
'nodeN',
{'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeN': {}}),
({'nodeA': {'nodeA': 'weight', 'nodeB': 'weight'},
'nodeB': {'nodeC': 'weight', 'nodeA': 'weight'}},
'nodeN',
{'nodeA': {'nodeA': 'weight', 'nodeB': 'weight'},
'nodeB': {'nodeC': 'weight', 'nodeA': 'weight'},
'nodeN': {}}),
]
GRAPHS_ADD_EDGE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
"nodeX",
"nodeY",
{'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeX': {'nodeY': 'weight'},
|
'nodeY': {}}),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
'nodeA',
|
'nodeB',
{'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}}),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}},
'nodeB',
'nodeC',
{'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight', 'nodeC': 'weight'},
'nodeC': {'nodeA': 'weight', 'nodeC': 'weight'}}),
]
GRAPHS_DEL_NODE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeX': {'nodeY': 'weight'},
'nodeY': {}},
'nodeA',
{'nodeB': {},
'nodeX': {'nodeY': 'weight'},
'nodeY': {}}),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {'nodeA': 'weight'}},
'nodeB',
{'nodeA': {}}),
]
GRAPHS_DEL_EDGE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA',
'nodeB',
{'nodeA': {},
'nodeB': {}}),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {},
'nodeC': {}},
'nodeA',
'nodeB',
{'nodeA': {'nodeC': 'weight'},
'nodeB': {},
'nodeC': {}})
]
NEIGHBORS = [
({'nodeA': {},
'nodeB': {'nodeA': 'weight'}},
'nodeB',
['nodeA']),
({'nodeA': {},
'nodeB': {'nodeA': 'weight'}},
'nodeA',
[]),
({'nodeA': {'nodeB': 'weight', 'nodeC': 'weight'},
'nodeB': {'nodeA': 'weight'},
'nodeC': {'nodeA': 'weight'}},
'nodeA',
['nodeB', 'nodeC']),
]
ADJACENT = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA',
'nodeB',
True),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeB',
'nodeA',
False),
]
ADJACENT_NODES_GONE = [
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeX', 'nodeB'),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeX', 'nodeY'),
({'nodeA': {'nodeB': 'weight'},
'nodeB': {}},
'nodeA', 'nodeY'),
]
NODE_TRAVERSAL_BREADTH = [
({'A': {'B': 'weight', 'C': 'weight'},
'B': {'A': 'weight', 'D': 'weight', 'E': 'weight'},
'C': {'A': 'weight', 'F': 'weight', 'G': 'weight'},
'D': {'B': 'weight', 'H': 'weight'},
'E': {'B': 'weight'},
'F': {'C': 'weight'},
'G': {'C': 'weight'},
'H': {'D': 'weight'}},
'A',
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']),
({'A': {'B': 'weight', 'C': 'weight'},
'B': {'C': 'weight', 'D': 'weight'},
'C': {},
'D': {}},
'A',
['A', 'B', 'C', 'D']),
({'a': {}}, 'a', ['a']),
]
NODE_TRAVERSAL_DEPTH = [
({'A': {'B': 'weight', 'E': 'weight'},
"B": {'C': 'weight', 'D': 'weight'},
'E': {},
'C': {},
'D': {}},
'A',
['A', 'E', 'B', 'D', 'C']),
({'A': {'B': 'weight', 'E': 'weight'},
"B": {'C': 'weight', 'D': 'weight'},
'E': {},
'C': {'A': 'weight', 'E': 'weight'},
'D': {}},
'A',
['A', 'E', 'B', 'D', 'C']),
({'a': {'b': 'weight', 'g': 'weight'},
'b': {'c': 'weight'},
'g': {'h': 'weight', 'j': 'weight'},
'c': {'d': 'weight'},
'h': {'i': 'weight'},
'j': {'k': 'weight'},
'd': {'e': 'weight', 'f': 'weight'},
'i': {},
'k': {},
'e': {},
'f': {}},
'a',
['a', 'g', 'j', 'k', 'h', 'i', 'b', 'c', 'd', 'f', 'e']),
({'a': {}}, 'a', ['a']),
]
GET_WEIGHT = [
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'A',
'B',
'weight1',),
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'B',
'C',
'weight3',),
({'A': {'B': 'weight1', 'E': 'weight2'},
"B": {'C': 'weight3', 'D': 'weight4'},
'E': {},
'C': {},
'D': {}},
'B',
'D',
'weight4',),
]
@pytest.fixture
def graph_fixture(scope='function'):
from graph import Graph
return Graph()
@pytest.mark.parametrize(("built_graph", "node", "expected"), GRAPHS_DEL_NODE)
def test_del_node_exists(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
graph_fixture.del_node(node)
assert graph_fixture._container == expected
@pytest.mark.parametrize(("built_graph", "node_list", "edge_list"), GRAPHS)
def test_nodes(graph_fixture, built_graph, node_list, edge_list):
graph_fixture._container = built_graph
result = graph_fixture.nodes()
assert set(result) == set(node_list)
@pytest.mark.parametrize(("built_graph", "node_list", "edge_list"), GRAPHS)
def test_edges(graph_fixture, built_graph, node_list, edge_list):
graph_fixture._container = built_graph
result = graph_fixture.edges()
assert set(edge_list) == set(result)
@pytest.mark.parametrize(("built_graph", "new_node", "expected"),
GRAPHS_FOR_NODE_INSERT)
def test_add_node(graph_fixture, built_graph, new_node, expected):
graph_fixture._container = built_graph
graph_fixture.add_node(new_node)
assert graph_fixture._container == expected
@pytest.mark.parametrize(("built_graph", "n1", "n2", "expected"),
GRAPHS_ADD_EDGE)
def test_add_edge(graph_fixture, built_graph, n1, n2, expected):
graph_fixture._container = built_graph
graph_fixture.add_edge(n1, n2)
assert graph_fixture._container == expected
def test_del_node_not_exists(graph_fixture):
graph_fixture._container = {'nodeA': {'nodeA': 'weight'}, 'nodeB': {}}
with pytest.raises(KeyError):
graph_fixture.del_node('nodeX')
@pytest.mark.parametrize(("built_graph", "node1", "node2", "expected"),
GRAPHS_DEL_EDGE)
def test_del_edge(graph_fixture, built_graph, node1, node2, expected):
graph_fixture._container = built_graph
graph_fixture.del_edge(node1, node2)
assert graph_fixture._container == expected
def test_del_edge_not_exists(graph_fixture):
graph_fixture._container = {'nodeA': {}}
with pytest.raises(ValueError):
graph_fixture.del_edge('nodeA', 'nodeB')
def test_has_node_true(graph_fixture):
graph_fixture._container = {'nodeA': {}}
assert graph_fixture.has_node('nodeA')
def test_has_node_false(graph_fixture):
graph_fixture._container = {'nodeA': {}}
assert not graph_fixture.has_node('nodeB')
@pytest.mark.parametrize(("built_graph", 'node', 'expected'), NEIGHBORS)
def test_neighbors(graph_fixture, built_graph, node, expected):
graph_fixture._container = built_graph
assert set(graph_fixture.neighbors(node)) == set(expected)
def test_neighbors
|
momingsong/ns-3
|
bash-py-gp/baseline_picdata.py
|
Python
|
gpl-2.0
| 12,175
| 0.012238
|
import sys
import os
#For baseline and redundacy-detecion to prepare message size picture
def MessageSize(typePrefix, directory):
wf = open("%(typePrefix)s-msgsize.data"%vars(), "w")
wf.write("#Suggest Filename: %(typePrefix)s-message.data\n#Data for drawing message overall size in different Amount/Redundancy\n"%vars())
wf.write("#row: amount(10000, 20000, 30000, 40000, 50000)\n#col: redundancy(1, 2, 3, 4, 5)\n")
wf.write('#amount\tRedundancy 1\tRedundancy 2\tRedundancy 3\tRedundancy 4\tRedundancy 5\n')
num = 100 # may subject to change by the simulation node number
for amount in [10000,20000,30000,40000,50000]:
wf.write(str(amount) + " ")
for redundancy in [1,2,3,4,5]:
file = open("%(directory)s%(typePrefix)s_da%(amount)s_r%(redundancy)s_overhead.data"%vars())
for line in file:
if line[0] != "#":
numbers = line.split(' ')
wf.write(numbers[7]+" ")
wf.write("\n")
file.close()
def RecvToSendRatio(typePrefix, directory):
writefile = open("%(typePrefix)s-rsratio-nonce.data"%vars(), "w")
writefile.write("#Suggest Filename: %(typePrefix)s-rsratio.data\n#Data for drawing each package in different Amount/Redundancy\n"%vars())
writefile.write("#MPM100 ratio MPM200 ratio MPM300 ratio MPM400 ratio MPM500 ratio MPM600 ratio NoLimit ratio\n")
writefile.write("0 0 0 0 0 0 0 0 0 0 \n")
backofftime = 2.5 # may subject to change by the data amount wanted to observe
da=50000
msgcount = {}
ratiotemp = {}
ratio = {}
for redundancy in [1,2,3,4,5]:
msgcount[redundancy] = {}
for logcontent in ['SI','SD']:
file = open("%(directory)s%(typePrefix)s_da%(da)s_r%(redundancy)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if msgcount[redundancy].has_key(nonce):
msgcount[redundancy][nonce]["s"] += 1
else:
msgcount[redundancy][nonce] = {}
msgcount[redundancy][nonce]["s"] = 1
msgcount[redundancy][nonce]["r"] = 0
msgcount[redundancy][nonce]["rs"] = 0
for logcontent in ['RI','DRD']:
file = open("%(directory)s%(typePrefix)s_da%(da)s_r%(redundancy)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent or line[0:3] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if(msgcount[redundancy].has_key(nonce)):
msgcount[redundancy][nonce]["r"] += 1
else:
print logcontent, redundancy, nonce
for nonce in msgcount[redundancy]:
msgcount[redundancy][nonce]['rs'] = float(msgcount[redundancy][nonce]['r']) / float(msgcount[redundancy][nonce]['s'])
msg = sorted(msgcount[redundancy].iteritems(), key=lambda s: s[1]['rs'])
for x in range(len(msg)):
ratiotemp[msg[x][1]["rs"]] = float(x+1) / len(msg);
ratio[redundancy] = sorted(ratiotemp.iteritems())
ratiotemp.clear()
length = max(len(ratio[1]),len(ratio[2]),len(ratio[3]),len(ratio[4]),len(ratio[5]))
for j in range(length):
for i in [1,2,3,4,5]:
if(len(ratio[i])<=j):
writefile.write("null null")
else:
writefile.write(str(ratio[i][j][0])+" "+str(ratio[i][j][1])+ " ")
writefile.write("\n")
def RecvToSendRatioHopnonce(typePrefix, directory):
writefile = open("%(typePrefix)s-rsratio-hopnonce.data"%vars())
writefile.write("#Suggest Filename: %(typePrefix)s-rsratio.data\n#Data for drawing each package in different Amount/Redundancy\n"%vars())
writefile.write("#MPM100 ratio MPM200 ratio MPM300 ratio MPM400 ratio MPM500 ratio MPM600 ratio NoLimit ratio\n")
writefile.write("0 0 0 0 0 0 0 0 0 0 0 0 0 0\n")
backofftime = 2.5 # may subject to change by the data amount wanted to observe
msgcount = {}
ratiotemp = {}
ratio = {}
for mms in [100,200,300,400,500,600,-1]:
msgcount[mms] = {}
for logcontent in ['SI','SD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
if line[0:2] == logcontent:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if msgcount[mms].has_key(nonce):
msgcount[mms][nonce]["s"] += 1
else:
msgcount[mms][nonce] = {}
msgcount[mms][nonce]["s"] = 1
msgcount[mms][nonce]["r"] = 0
msgcount[mms][nonce]["rs"] = 0
for logcontent in ['RI','DRD']:
file = open("%(directory)s%(typePrefix)s_mb%(backofftime)s_mms%(mms)s_%(logcontent)s.data"%vars())
for line in file:
|
if line[0:2] == logcontent or line[0:3] == logcontent
|
:
info = line.split(' ')
for x in info:
if x[0:2] == "Ho":
nonce = x.split(':')[1]
if(msgcount[mms].has_key(nonce)):
msgcount[mms][nonce]["r"] += 1
else:
print logcontent, mms, nonce
for nonce in msgcount[mms]:
msgcount[mms][nonce]['rs'] = float(msgcount[mms][nonce]['r']) / float(msgcount[mms][nonce]['s'])
msg = sorted(msgcount[mms].iteritems(), key=lambda s: s[1]['rs'])
for x in range(len(msg)):
ratiotemp[msg[x][1]["rs"]] = float(x+1) / len(msg);
ratio[mms] = sorted(ratiotemp.iteritems())
ratiotemp.clear()
length = max(len(ratio[100]),len(ratio[200]),len(ratio[300]),len(ratio[400]),len(ratio[500]),len(ratio[-1]))
for j in range(length):
for i in [100,200,300,400,500,600,-1]:
if(len(ratio[i])<=j):
writefile.write("null null")
else:
writefile.write(str(ratio[i][j][0])+" "+str(ratio[i][j][1]))
writefile.write("\n")
#Get recall and latency
def RecallAndLatency(typePrefix, directory):
recallf = open("./%(typePrefix)s-recall.data"%vars(), "w")
latencyf = open("./%(typePrefix)s-latency.data"%vars(), "w")
recallf.write("#Data for recall of the %(typePrefix)s\n"%vars())
latencyf.write("#Data for latency of the %(typePrefix)s\n"%vars())
recallf.write("# row: max_backoff(0 0.5 1 1.5 2 2.5 3)\n")
recallf.write("# col: max_message_size(-1, 200, 400, 600, 800, 1000)\n")
recallf.write("#MaxBackoff No Limits 100 200 300 400 500\n")
latencyf.write("# row: max_backoff(0 0.5 1 1.5 2 2.5 3)\n")
latencyf.write("# col: max_message_size(-1, 200, 400, 600, 800, 1000)\n")
latencyf.write("#MaxBackoff No Limits 100 200 300 400 500\n")
for amount in [10000,20000,30000,40000,50000]:
recallf.write(str(amount)+" ")
latencyf.write(str(amount)+" ")
for redundancy in [1,2,3,4,5]:
file = open("%(directory)s%(typePrefix)s_da%(amount)s_r%(redundancy)s_0.data"%vars())
line = file.readlines()[-1].split()
recallf.write(str(float(line[1])/amount)+" ")
latencyf.write(line[0]+" ")
fil
|
rodrigofaccioli/2pg_cartesian
|
scripts/analysis/compute_rmsd_pdb_files.py
|
Python
|
apache-2.0
| 1,725
| 0.029565
|
"""
Routines to compute RMSD of all PROT_IND_ files
These
|
routines were developed by:
Rodrigo Antonio Faccioli - rodrigo.faccioli@usp.br / rodrigo.faccioli@gmail.com
Leandro Oliveira Bortot - leandro.bortot@usp.br / leandro.obt@gmail.com
"""
import os
import sys
from collections import OrderedDict
native = "1VII.pdb"
path_gromacs ="/home/faccioli/Programs/gmx-4.6.5/no_mpi/bin/"
main_command = "echo C-alpha C-alpha | @PATH_GROMACS@./g_rms -f @PROT@ -s @NATIVE@ -o t
|
emporary_rmsd.xvg 2>/dev/null"
""" This function obtains all pdb files
in mypath
"""
def get_PROT_IND_files_pdb(mypath):
only_pdb_file = []
for root, dirs, files in os.walk(mypath):
for file in files:
#if file.endswith(".pdb"):
if file.find("PROT_IND_") >=0:
f_path = os.path.join(root,file)
only_pdb_file.append(f_path)
return only_pdb_file
def main():
pdb_path = sys.argv[1]
dict_rmsd = {}
all_pdbs = get_PROT_IND_files_pdb(pdb_path)
for pdb in all_pdbs:
aux_command = main_command.replace("@PATH_GROMACS@", path_gromacs).replace("@PROT@",pdb).replace("@NATIVE@", native)
os.system(aux_command)
temp_rmsd = open("temporary_rmsd.xvg", "r")
for line in temp_rmsd.readlines():
if line.find("@") < 0 and line.find("#") <0:
rmsd_value = float(str(line).split()[1])
only_pdb_file_name = os.path.basename(pdb)
dict_rmsd[only_pdb_file_name] = rmsd_value
temp_rmsd.close()
os.remove("temporary_rmsd.xvg")
#Saving dictionary
rmsd_final = open("all_rmsd.txt", "w")
d_sorted_by_value = OrderedDict(sorted(dict_rmsd.items(), key=lambda x: x[1]))
for key, value in d_sorted_by_value.items():
rmsd_final.write(str(key) +"\t" + str(value) + "\n")
rmsd_final.close()
main()
|
RRCKI/pilot
|
castorSvcClassSiteMover.py
|
Python
|
apache-2.0
| 16,969
| 0.005187
|
import os
import commands
import re
import SiteMover
from futil import *
from PilotErrors import PilotErrors
from pUtil import tolog, readpar, verifySetupCommand
from time import time
from FileStateClient import updateFileState
from timed_command import timed_command
class castorSvcClassSiteMover(SiteMover.SiteMover):
"""
SiteMover for CASTOR, which finds the correct service class from which to stage in
files via rfcp.
"""
copyCommand = "rfcpsvcclass"
checksum_command = "adler32"
has_mkdir = True
has_df = False
has_getsize = True
has_md5sum = False
has_chmod = True
timeout = 5*3600
def __init__(self, setup_path='', *args, **kwrds):
self._setup = setup_path
def get_timeout(self):
return self.timeout
def _check_space(self, ub):
"""CASTOR specific space verification.
There is no simple way at the moment to verify CASTOR space availability - check info system instead"""
return 999999
def addMD5sum(self, lfn, md5sum):
""" add md5sum to lfn """
if os.environ.has_key('LD_LIBRARY_PATH'):
tolog("LD_LIBRARY_PATH prior to lfc import: %s" % os.environ['LD_LIBRARY_PATH'])
else:
tolog("!!WARNING!!2999!! LD_LIBRARY_PATH not set prior to lfc import")
import lfc
os.environ['LFC_HOST'] = readpar('lfchost')
stat = lfc.lfc_filestatg()
exitcode = lfc.lfc_statg(lfn, "", stat)
if exitcode != 0:
# print "error:",buffer
err_num = lfc.cvar.serrno
tolog("!!WARNING!!2999!! lfc.lfc_statg: %d %s" % (err_num, lfn))
return exitcode
exitcode = lfc.lfc_setfsizeg(stat.guid, stat.filesize, 'MD', md5sum)
if exitcode != 0:
# print "error:",buffer
err_num = lfc.cvar.serrno
tolog("[Non-fatal] ERROR: lfc.lfc_setfsizeg: %d %s %s" % (err_num, lfn, md5sum))
return exitcode
tolog("Successfully set md5sum for %s" % (lfn))
return exitcode
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
""" The local file is assubed to have a relative path that is the same of the relative path in the 'gpfn'
loc_... are the variables used to access the file in the locally exported file system
TODO: document GPFN format (SURL from catalog srm://host/path)
TODO: document better constraint
"""
error = PilotErrors()
pilotErrorDiag = ""
# Get input parameters from pdict
useCT = pdict.get('usect', True)
jobId = pdict.get('jobId', '')
workDir = pdict.get('workDir', '')
prodDBlockToken = pdict.get('access', '')
# get the DQ2 tracing report
report = self.getStubTracingReport(pdict['report'], 'castorSVC', lfn, guid)
# get a proper envsetup
envsetup = self.getEnvsetup(get=True)
# Hard code the configuration dictionary for now, but eventually this should be
# set dynamically.
#
# There are the following configuration sections:
# setup - base environment veriables to be set
# svcClassMap - dictionary of string matches vs. service class names
# svcClassList - list of all service classes in case the svcClassMap matching fails
# svcClassDefault - the service class to set if the file appears to be staged no where
#
# Information from RAL:
# [root@srm0661 ~]# listStorageArea -v atlas
# <Space Token> <Description> <service class> <type> <status>
# 4948ef55-0000-1000-b7dd-9b38bdd87201 "ATLASGROUP" "atlasStripDeg" "DURABLE" "ALLOCATED"
# 4948ef38-0000-1000-8606-973e4e998e02 "ATLASMCDISK" "atlasSimStrip" "DURABLE" "ALLOCATED"
# 4948eec6-0000-1000-8ca2-aba0529b4806 "ATLASDATADISK" "atlasStripInput" "DURABLE" "ALLOCATED"
# 4948ee8e-0000-1000-9ac5-81bb9b34ba7b "ATLASMCTAPE" "atlasSimRaw" "PERMANENT" "ALLOCATED"
# 4948ee71-0000-1000-b611-a0afad31f6c8 "ATLASDATATAPE" "atlasT0Raw" "PERMANENT" "ALLOCATED"
# "ATLASHOTDISK" "atlasHotDisk"
# In addition there is the "atlasFarm" class, which is used when data is staged back from tape
castorConfig = {
'setup' : {
'STAGE_HOST' : 'catlasstager.ads.rl.ac.uk',
'STAGER_HOST' : 'catlasstager.ads.rl.ac.uk',
'RFIO_USE_CASTOR_V2' : 'YES',
},
'svcClassList' : ('atlasHotDisk', 'atlasSimStrip', 'atlasStripInput', 'atlasFarm', 'atlasStripDeg', 'atlasT0Raw', 'atlasSimRaw', 'atlasScratchDisk', ),
'svcClassMap' : {
'/atlashotdisk/' : 'atlasHotDisk',
'/atlasmcdisk/' : 'atlasStripInput',
'/atlasdatadisk/' : 'atlasStripInput',
'/atlasgroupdisk/' : 'atlasStripDeg',
'/atlasdatatape/' : 'atlasFarm',
'/atlasmctape/' : 'atlasFarm',
'/atlasscratchdisk/' : 'atlasScratchDisk',
'/atlasProdDisk/' : 'atlasScratchDisk',
},
'svcClassDefault' : 'atlasFarm',
}
# Set all environment variables for castor setup
for envVar, value in castorConfig['setup'].iteritems():
os.environ[envVar] = value
# Strip the gpfn (SURL) back to its bare castor component
tolog("gpfn is %s" % gpfn)
if self._setup:
_setup_str = "source %s; " % self._setup
else:
_setup_str = envsetup
ec, pilotErrorDiag = verifySetupCommand(error, _setup_str)
if ec != 0:
self.__sendReport('RFCP_FAIL', report)
return ec, pilotErrorDiag
loc_pfn = ''
if( gpfn.find('SFN') != -1 ):
s = gpfn.split('SFN=')
loc_pfn = s[1]
tolog("Found SFN string. Local file name %s" % loc_pfn)
else:
_tmp = gpfn.split('/', 3)
loc_pfn = '/'+_tmp[3]
tolog("Splitting SURL on slashes. Got local file name %s" % loc_pfn)
if not loc_pfn.startswith('/castor/'):
tolog("WARNING: Problem with local filename: Does not start with '/castor/'.")
# should the root file be copied or read directly by athena?
directIn, useFileStager = self.getTransferModes()
if directIn:
if useCT:
directIn = False
tolog("Direct access mode is switched off (file will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
else:
# determine if the file is a root file according to its name
rootFile = self.isRootFileName(lfn)
if prodDBlockToken == 'local' or not rootFile:
directIn = False
tolog("Direct access mode has been switched off for this file (will be transferred with the copy tool)")
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="copy_to_scratch", type="input")
elif rootFile:
tolog("Found root file according to file name: %s (will not be transferred in direct reading mode)" % (lfn))
report['relativeStart'] = None
report['transferStart'] = None
self.__sendReport('FOUND_ROOT', report)
if useFileStager:
|
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="file_stager", type="input")
else:
updateFileState(lfn, workDir, jobId, mode="transfer_mode", state="remote_io", type="input")
return error.ERR_DIRECTIOFILE, pilotErrorDiag
else:
|
tolog("Normal file transfer")
# Now need to find the service class associated with the file.
# If we
|
fatihzkaratana/intranet
|
backend/intranet/tests/api.py
|
Python
|
apache-2.0
| 7,125
| 0.012351
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
import json
from intranet.models import User, Project, Part, STATE_CREATED
class Test(TestCase):
@classmethod
def setUpClass(self):
self.c = Client()
User.objects.all().delete()
#Create users
user = User(
username = 'user1',
first_name = 'first',
last_name = 'last',
email = 'user@test.es'
)
user.set_password('dummy')
user.save()
self.user = user
#LOGIN
#response = self.c.post(reverse('auth-login'), {'username': self.user.username, 'password':'dummy'})
#self.assertEqual(response.status_code,200)
#json_response = json.loads(response.content)
#self.assertEqual(json_response['valid'], True)
#self.token_auth = json_response['token_auth']
self.project = Project(
name = 'project 1',
description = 'description project 1',
)
self.project.save()
self.part = Part(
month = 06,
year = 2011,
employee = self.user,
state = 1,
)
self.part.save()
#self.imputation = Imputation(
# part = self.part,
# day = 13,
# hours = 5,
# project = self.project,
#)
#self.imputation.save()
def test_login_logout_ok(self):
self.c = Client()
response = self.c.post(reverse('auth-login'), {'username': self.user.username, 'password':'dummy'})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
token_auth = json_response['token_auth']
self.c = Client()
response = self.c.get(reverse('auth-logout'), {'token_auth': token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
def test_logout_invalid(self):
self.c = Client()
response = self.c.get(reverse('api:logout'))
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], False)
def test_project_list(self):
self.c = Client()
response = self.c.get(reverse('api:project-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['projects']), 1)
def test_part_list(self):
self.c = Client()
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 1)
def test_imputation_list(self):
self.c = Client()
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 1)
def test_imputation_create(self):
self.c = Client()
response = self.c.post(reverse('api:imputation-add'), {'project': self.project.id, 'day':3, 'hours':5, 'part':self.part.id, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
self.assertEqual(json_response.has_key('id'), True)
id_imp = json_response['id']
#Invalid part
response = self.c.post(reverse('api:imputation-add'), {'project': self.project.id, 'day':3, 'hours':5, 'part':222, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], False)
#Invalid day
response = self.c.post(reverse('api:imputation-add'), {'token_auth': self.token_auth, 'day':33, 'part':self.part.id, 'project': self.project.id})
self.asse
|
rtEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], False)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':3, 'part':self.part.id, 'project'
|
: self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 1)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':1, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 0)
#Delete
response = self.c.get(reverse('api:imputation-delete', args=[id_imp]), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
response = self.c.get(reverse('api:imputation-list'), {'token_auth': self.token_auth, 'day':3, 'part':self.part.id, 'project': self.project.id})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['imputations']), 0)
def test_part_creation(self):
self.c = Client()
response = self.c.post(reverse('api:part-add'), {'month': 3, 'year':2008, 'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
self.assertEqual(json_response.has_key('id'), True)
id_part = json_response['id']
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 2)
parts = json_response['parts']
for part in parts:
if part['id'] == id_part:
self.assertEqual(part['state'], STATE_CREATED)
response = self.c.get(reverse('api:part-delete', args=[id_part]), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(json_response['valid'], True)
response = self.c.get(reverse('api:part-list'), {'token_auth': self.token_auth})
self.assertEqual(response.status_code,200)
json_response = json.loads(response.content)
self.assertEqual(len(json_response['parts']), 1)
|
japsu/voitto
|
tappio/models.py
|
Python
|
gpl-3.0
| 2,774
| 0.001081
|
# Voitto - a simple yet efficient double ledger bookkeeping system
# Copyright (C) 2010 Santtu Pajukanta <santtu@pajukanta.fi>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
A Tappio ledger contains an account tree and a list of events. These are
encapsulated in the Document class.
Relationships between the classes in pseudo-UML:
Document 1 --> * Account
Document 1 --> * Event 1 --> * Entry 1 --> Account
"""
from datetime import date
import voitto
DEFAULT_IDENTITY = "Tappio"
DEFAULT_VERSION = "Voitto " + voitto.__version__
DEFAULT_BEGIN = date(2010, 1, 1)
DEFAULT_END = date(2010, 12, 31)
DEFAULT_INDENT = " "
class Document(object):
"""
Encapsulates a Tappio ledger.
A note about Document.accounts:
In Tappio, accounts are represented as forest of three trees. The
meanings of these trees are associated with the Finnish accounting
system. The first tree is always "vastaavaa" ("assets"), the second
is "vastattavaa" ("liabilities") and the third is "earnings" ("tulos").
"""
def __init__(self, identity=DEFAULT_IDENTITY, version=DEFAULT_VERSION,
name="", begin=DEFAULT_BEGIN, end=DEFAULT_END, accounts=None,
events=None):
self.identity = identity
self.version = version
self.name = name
self.begin = begin
self.end = end
self.accounts = accounts if accounts is not None else []
self.events = events if events is not None else []
class Account(object):
def __init__(self, number=None, name="", subaccounts=None, vat_type=None, vat_percent=None):
self.number = number
self.name = name
self.vat_type = vat_type
self.vat_percent = vat_percent
self.subaccounts = subaccounts if subacco
|
unts is not None else []
class Event(object):
def __init__(self, number, date, description="", entries=None):
self.number = number
self.date = date
|
self.description = description
self.entries = entries if entries is not None else []
class Entry(object):
def __init__(self, account_number, cents):
self.account_number = account_number
self.cents = cents
|
chop-dbhi/varify-data-warehouse
|
vdw/assessments/migrations/0007_auto__add_unique_assessment_user_sample_result.py
|
Python
|
bsd-2-clause
| 20,310
| 0.008567
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Assessment', fields ['user', 'sample_result']
db.create_unique('assessment', ['user_id', 'sample_result_id'])
def backwards(self, orm):
# Removing unique constraint on 'Assessment', fields ['user', 'sample_result']
db.delete_unique('assessment', ['user_id', 'sample_result_id'])
models = {
'assessments.assessment': {
'Meta': {'unique_together': "(('sample_result', 'user'),)", 'object_name': 'Assessment', 'db_table': "'assessment'"},
'assessment_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.AssessmentCategory']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'evidence_details': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'father_result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'father'", 'to': "orm['assessments.ParentalResult']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'mother_result': ('django.db.models.fields.related.ForeignKey', []
|
, {'related_name': "'mother'", 'to': "orm['assessments.ParentalResult']"}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pathogenicity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.Pathogenicity']"}),
'sample_result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['samples.Result']"
|
}),
'sanger_requested': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sanger_result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['assessments.SangerResult']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'assessments.assessmentcategory': {
'Meta': {'object_name': 'AssessmentCategory', 'db_table': "'assessment_category'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.parentalresult': {
'Meta': {'object_name': 'ParentalResult', 'db_table': "'parental_result'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.pathogenicity': {
'Meta': {'object_name': 'Pathogenicity', 'db_table': "'pathogenicity'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'})
},
'assessments.sangerresult': {
'Meta': {'object_name': 'SangerResult', 'db_table': "'sanger_result'"},
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'genome.chromosome': {
'Meta': {'ordering': "['order']", 'object_name': 'Chromosome', 'db_table': "'chromosome'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'})
},
'genome.genotype': {
'Meta': {'object_name': 'Genotype', 'db_table': "'genotype'"},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'literature.pubmed': {
'Meta': {'object_name': 'PubMed', 'db_table': "'pubmed'"},
'pmid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'phenotypes.phenotype': {
'Meta': {'object_name': 'Phenotype', 'db_table': "'phenotype'"},
'articles': ('django
|
zcarwile/quixotic_webapp
|
quixotic_webapp/settings.py
|
Python
|
gpl-3.0
| 3,517
| 0.001137
|
"""
Django settings for quixotic_webapp project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# DB parameters in this file
from . import parameters
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yh3$*6egtz79m@0(g!0txzr2rt2#xg852ne9cre&a3=twv#oc('
# SECURITY WARNING: don't run with debug turned on in production!
# DEBUG = False
DEBUG = True
ALLOWED_HOSTS = [
u'ec2-54-173-30-19.compute-1.amazonaws.com',
u'54.173.30.19',
]
# Application definition
INSTALLED_APPS = [
'quixotic_api.apps.QuixoticApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
]
MIDDLEWAR
|
E = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT
|
_URLCONF = 'quixotic_webapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'quixotic_webapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': parameters.DB_NAME,
'USER': parameters.DB_USER,
'PASSWORD': parameters.DB_PASSWORD,
'HOST': parameters.DB_HOST,
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_WHITELIST = (
'localhost:8888'
)
|
dhuang/incubator-airflow
|
airflow/timetables/interval.py
|
Python
|
apache-2.0
| 3,585
| 0.000558
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from typing import Any, Optional
from pendulum import DateTime
from airflow.timetables.base import DagRunInfo, TimeRestriction, Timetable
from airflow.timetables.schedules import CronSchedule, Delta, DeltaSchedule, Schedule
class _DataIntervalTimetable(Timetable):
"""Basis for timetable implementations that schedule data intervals.
This kind of timetable classes create periodic data intervals from an
underlying schedule representation (e.g. a cron expression, or a timedelta
instance), and schedule a DagRun at the end of each interval.
"""
_schedule: Schedule
def __eq__(self, other: Any) -> bool:
"""Delegate to the schedule."""
if not isinstance(other, _DataIntervalTimetable):
return NotImplemented
return self._schedule == other._schedule
def validate(self) -> None:
self._schedule.validate()
def next_dagrun_info(
self,
last_automated_dagrun: Optional[DateTime],
restriction: TimeRestriction,
) -> Optional[DagRunInfo]:
earliest = restriction.earliest
if not restriction.catchup:
earliest = self._schedule.skip_to_latest(earliest)
if last_automated_dagrun is None:
# First run; schedule the run at the first available time matching
# the schedule, and retrospectively create a data interval for it.
if earliest is None:
return None
start = self._schedule.align(earliest)
else:
# There's a previous run. Create a data interval starting from when
# the end of the previous interval.
start = self._schedule.get_next(last_automated_dagrun)
if restriction.latest is not None and start > restriction.latest:
return None
end = self._schedule.get_next(start)
return Da
|
gRunInfo.interval(start=start, end=end)
class CronDataIntervalTimetable(_DataIntervalTimetable):
"""Timetable that schedules data intervals with a cron expression.
This corresponds to ``schedule_interval=<cron>``, where ``<cron>`` is either
|
a five/six-segment representation, or one of ``cron_presets``.
Don't pass ``@once`` in here; use ``OnceTimetable`` instead.
"""
def __init__(self, cron: str, timezone: datetime.tzinfo) -> None:
self._schedule = CronSchedule(cron, timezone)
class DeltaDataIntervalTimetable(_DataIntervalTimetable):
"""Timetable that schedules data intervals with a time delta.
This corresponds to ``schedule_interval=<delta>``, where ``<delta>`` is
either a ``datetime.timedelta`` or ``dateutil.relativedelta.relativedelta``
instance.
"""
def __init__(self, delta: Delta) -> None:
self._schedule = DeltaSchedule(delta)
|
jpelias/pyTelegramBotAPI
|
tests/test_telebot.py
|
Python
|
gpl-2.0
| 7,077
| 0.002685
|
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import time
import pytest
import os
import telebot
from telebot import types
from telebot import util
should_skip = 'TOKEN' and 'CHAT_ID' not in os.environ
if not should_skip:
TOKEN = os.environ['TOKEN']
CHAT_ID = os.environ['CHAT_ID']
@pytest.mark.skipif(should_skip, reason="No environment variables configured")
class TestTeleBot:
def test_message_listener(self):
msg_list = []
for x in range(100):
msg_list.append(self.create_text_message('Message ' + str(x)))
def listener(messages):
assert len(messages) == 100
tb = telebot.TeleBot('')
tb.set_update_listener(listener)
def test_message_handler(self):
tb = telebot.TeleBot('')
msg = self.create_text_message('/help')
@tb.message_handler(commands=['help', 'start'])
def command_handler(message):
message.text = 'got'
tb.process_new_messages([msg])
time.sleep(1)
assert msg.text == 'got'
def test_message_handler_reg(self):
bot = telebot.TeleBot('')
msg = self.create_text_message(r'https://web.telegram.org/')
@bot.message_handler(regexp='((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)')
def command_url(message):
msg.text = 'got'
bot.process_new_messages([msg])
time.sleep(1)
assert msg.text == 'got'
def test_message_handler_lambda(self):
bot = telebot.
|
TeleBot('')
msg = self.create_text_message(r'lambda_text')
@bot.message_hand
|
ler(func=lambda message: r'lambda' in message.text)
def command_url(message):
msg.text = 'got'
bot.process_new_messages([msg])
time.sleep(1)
assert msg.text == 'got'
def test_message_handler_lambda_fail(self):
bot = telebot.TeleBot('')
msg = self.create_text_message(r'text')
@bot.message_handler(func=lambda message: r'lambda' in message.text)
def command_url(message):
msg.text = 'got'
bot.process_new_messages([msg])
time.sleep(1)
assert not msg.text == 'got'
def test_message_handler_reg_fail(self):
bot = telebot.TeleBot('')
msg = self.create_text_message(r'web.telegram.org/')
@bot.message_handler(regexp='((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)')
def command_url(message):
msg.text = 'got'
bot.process_new_messages([msg])
time.sleep(1)
assert not msg.text == 'got'
def test_send_message_with_markdown(self):
tb = telebot.TeleBot(TOKEN)
markdown = """
*bold text*
_italic text_
[text](URL)
"""
ret_msg = tb.send_message(CHAT_ID, markdown, parse_mode="Markdown")
assert ret_msg.message_id
def test_send_file(self):
file_data = open('../examples/detailed_example/kitten.jpg', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_document(CHAT_ID, file_data)
assert ret_msg.message_id
ret_msg = tb.send_document(CHAT_ID, ret_msg.document.file_id)
assert ret_msg.message_id
def test_send_video(self):
file_data = open('./test_data/test_video.mp4', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_video(CHAT_ID, file_data)
assert ret_msg.message_id
def test_send_video_more_params(self):
file_data = open('./test_data/test_video.mp4', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_video(CHAT_ID, file_data, 1)
assert ret_msg.message_id
def test_send_file_exception(self):
tb = telebot.TeleBot(TOKEN)
try:
tb.send_document(CHAT_ID, None)
assert False
except Exception as e:
print(e)
assert True
def test_send_photo(self):
file_data = open('../examples/detailed_example/kitten.jpg', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_photo(CHAT_ID, file_data)
assert ret_msg.message_id
ret_msg = tb.send_photo(CHAT_ID, ret_msg.photo[0].file_id)
assert ret_msg.message_id
def test_send_audio(self):
file_data = open('./test_data/record.mp3', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_audio(CHAT_ID, file_data, 1, 'eternnoir', 'pyTelegram')
assert ret_msg.content_type == 'audio'
assert ret_msg.audio.performer == 'eternnoir'
assert ret_msg.audio.title == 'pyTelegram'
def test_send_voice(self):
file_data = open('./test_data/record.ogg', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_voice(CHAT_ID, file_data)
assert ret_msg.voice.mime_type == 'audio/ogg'
def test_get_file(self):
file_data = open('./test_data/record.ogg', 'rb')
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_voice(CHAT_ID, file_data)
file_id = ret_msg.voice.file_id
file_info = tb.get_file(file_id)
assert file_info.file_id == file_id
def test_send_message(self):
text = 'CI Test Message'
tb = telebot.TeleBot(TOKEN)
ret_msg = tb.send_message(CHAT_ID, text)
assert ret_msg.message_id
def test_forward_message(self):
text = 'CI forward_message Test Message'
tb = telebot.TeleBot(TOKEN)
msg = tb.send_message(CHAT_ID, text)
ret_msg = tb.forward_message(CHAT_ID, CHAT_ID, msg.message_id)
assert ret_msg.forward_from
def test_reply_to(self):
text = 'CI reply_to Test Message'
tb = telebot.TeleBot(TOKEN)
msg = tb.send_message(CHAT_ID, text)
ret_msg = tb.reply_to(msg, text + ' REPLY')
assert ret_msg.reply_to_message.message_id == msg.message_id
def test_register_for_reply(self):
text = 'CI reply_to Test Message'
tb = telebot.TeleBot(TOKEN)
msg = tb.send_message(CHAT_ID, text, reply_markup=types.ForceReply())
reply_msg = tb.reply_to(msg, text + ' REPLY')
def process_reply(message):
assert msg.message_id == message.reply_to_message.message_id
tb.register_for_reply(msg, process_reply)
tb.process_new_messages([reply_msg])
def test_send_location(self):
tb = telebot.TeleBot(TOKEN)
lat = 26.3875591
lon = -161.2901042
ret_msg = tb.send_location(CHAT_ID, lat, lon)
assert int(ret_msg.location.longitude) == int(lon)
assert int(ret_msg.location.latitude) == int(lat)
def create_text_message(self, text):
params = {'text': text}
chat = types.User(11, 'test')
return types.Message(1, None, None, chat, 'text', params)
def test_is_string_unicode(self):
s1 = u'string'
assert util.is_string(s1)
def test_is_string_string(self):
s1 = 'string'
assert util.is_string(s1)
def test_not_string(self):
i1 = 10
assert not util.is_string(i1)
|
uskudnik/ggrc-core
|
src/tests/ggrc_workflows/notifications/test_enable_disable_notifications.py
|
Python
|
apache-2.0
| 7,378
| 0.010165
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import random
from tests.ggrc import TestCase
from freezegun import freeze_time
from datetime import datetime
from mock import patch
import os
from ggrc import notification
from ggrc.models import NotificationConfig, Notification, Person
from tests.ggrc_workflows.generator import WorkflowsGenerator
from tests.ggrc.api_helper import Api
from tests.ggrc.generator import GgrcGenerator
if os.environ.get('TRAVIS', False):
random.seed(1) # so we can reproduce the tests if needed
class TestEnableAndDisableNotifications(TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.wf_generator = WorkflowsGenerator()
self.ggrc_generator = GgrcGenerator()
Notification.query.delete()
self.random_objects = self.ggrc_generator.generate_random_objects(2)
_, self.user = self.ggrc_generator.generate_person(user_role="gGRC Admin")
self.create_test_cases()
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
@patch("ggrc.notification.email.send_email")
def test_default_notificaitons_settings(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-01 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(user.email, notif_data)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_disabled_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", False)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-01 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(u
|
ser.email, notif_data)
with freeze_time("20
|
15-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_enabled_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
with freeze_time("2015-01-29 13:39:20"):
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", True)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_forced_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf_forced)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", True)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_force_one_wf_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf_forced = self.wf_generator.generate_workflow(self.quarterly_wf_forced)
response, wf_forced = self.wf_generator.activate_workflow(wf_forced)
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("cycle_starts_in", notif_data[user.email])
self.assertIn(wf_forced.id, notif_data[user.email]["cycle_starts_in"])
self.assertIn(wf.id, notif_data[user.email]["cycle_starts_in"])
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", False)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("cycle_starts_in", notif_data[user.email])
self.assertIn(wf_forced.id, notif_data[user.email]["cycle_starts_in"])
self.assertNotIn(wf.id, notif_data[user.email]["cycle_starts_in"])
def create_test_cases(self):
def person_dict(person_id):
return {
"href": "/api/people/%d" % person_id,
"id": person_id,
"type": "Person"
}
self.quarterly_wf_forced = {
"title": "quarterly wf forced notification",
"notify_on_change": True,
"description": "",
"owners": [person_dict(self.user.id)],
"frequency": "quarterly",
"task_groups": [{
"title": "tg_1",
"contact": person_dict(self.user.id),
"task_group_tasks": [{
"contact": person_dict(self.user.id),
"description": self.wf_generator.random_str(100),
"relative_start_day": 5,
"relative_start_month": 2,
"relative_end_day": 25,
"relative_end_month": 2,
},
],
},
]
}
self.quarterly_wf = {
"title": "quarterly wf 1",
"description": "",
"owners": [person_dict(self.user.id)],
"frequency": "quarterly",
"task_groups": [{
"title": "tg_1",
"contact": person_dict(self.user.id),
"task_group_tasks": [{
"contact": person_dict(self.user.id),
"description": self.wf_generator.random_str(100),
"relative_start_day": 5,
"relative_start_month": 2,
"relative_end_day": 25,
"relative_end_month": 2,
},
],
},
]
}
|
bdaroz/the-blue-alliance
|
helpers/location_helper.py
|
Python
|
mit
| 24,876
| 0.003136
|
import json
import logging
import math
import re
import tba_config
import urllib
from difflib import SequenceMatcher
from google.appengine.api import memcache, urlfetch
from google.appengine.ext import ndb
from models.location import Location
from models.sitevar import Sitevar
from models.team import Team
class LocationHelper(object):
GOOGLE_API_KEY = None
@classmethod
def get_similarity(cls, a, b):
"""
Returns max(similarity between two strings ignoring case,
similarity between two strings ignoring case and order,
similarity between acronym(a) & b,
similarity between a & acronym(b)) from 0 to 1
where acronym() is generated by splitting along non word characters
Ignores case and order
"""
a = a.lower().strip()
b = b.lower().strip()
a_split = filter(lambda x: x, re.split('\s+|,|-', a))
b_split = filter(lambda x: x, re.split('\s+|,|-', b))
a_sorted = ' '.join(sorted(a_split))
b_sorted = ' '.join(sorted(b_split))
a_acr = ''.join([w[0] if w else '' for w in a_split]).lower()
b_acr = ''.join([w[0] if w else '' for w in b_split]).lower()
sm1 = SequenceMatcher(None, a, b)
sm2 = SequenceMatcher(None, a_sorted, b_sorted)
sm3 = SequenceMatcher(None, a_acr, b)
sm4 = SequenceMatcher(None, a, b_acr)
return max([
sm1.ratio(),
sm2.ratio(),
sm3.ratio(),
sm4.ratio(),
])
@classmethod
def update_event_location(cls, event):
if not event.location:
return
if event.normalized_location: # Only set normalized_location once
return
location_info, score = cls.get_event_location_info(event)
# Log performance
text = "Event {} location score: {}".format(event.key.id(), score)
if score < 0.8:
logging.warning(text)
else:
logging.info(text)
# Fallback to location only
if not location_info:
logging.warning("Falling back to location only for event {}".format(event.key.id()))
geocode_result = cls.google_maps_geocode_async(event.location).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0]).get_result()
else:
logging.warning("Event {} location failed!".format(event.key.id()))
# Update event
event.normalized_location = cls.build_normalized_location(location_info)
@classmethod
def get_event_location_info(cls, event):
"""
Search for different combinations of venue, venue_address, city,
state_prov, postalcode, and country in attempt to find the correct
location associated with the event.
"""
# Possible queries for location that will match yield results
if event.venue_address:
possible_queries = [event.venue_address.replace('\n', ' ')]
else:
possible_queries = []
if event.venue:
possible_queries.append(event.venue)
if event.venue_address:
split_address = event.venue_address.split('\n')
# Venue takes up at most 2 lines. Isolate address
possible_queries.append(' '.join(split_address[1:]))
possible_queries.append(' '.join(split_address[2:]))
# Geocode for lat/lng
lat_lng = cls.get_lat_lng(event.location)
if not lat_lng:
return {}, 0
# Try to find place based on possible queries
best_score = 0
best_location_info = {}
nearbysearch_results_candidates = [] # More trustworthy candidates are added first
for j, query in enumerate(possible_queries):
# Try both searches
nearbysearch_places = cls.google_maps_placesearch_async(query, lat_lng)
textsearch_places = cls.google_maps_placesearch_async(query, lat_lng, textsearch=True)
for results_future in [nearbysearch_places, textsearch_places]:
for i, place in enumerate(results_future.get_result()[:5]):
location_info = cls.construct_location_info_async(place).get_result()
score = cls.compute_event_location_score(query, location_info, lat_lng)
score *= pow(0.7, j) * pow(0.7, i) # discount by ranking
if score == 1:
return location_info, score
elif score > best_score:
best_location_info = location_info
best_score = score
return best_location_info, best_score
@classmethod
def compute_event_location_score(cls, query_name, location_info, lat_lng):
"""
Score for correctness. 1.0 is perfect.
Not checking for absolute equality in case of existing data errors.
"""
# TODO FIX: Hacky special case for weird event. 2017-01-18 -fangeugene
if 'Shenzhen' in query_name and location_info['name'] != 'Shenzhen University Town Sports Center':
return 0
# Check radius
R = 6373.0 # approximate radius of earth in km
lat1 = math.radians(lat_lng[0])
lon1 = math.radians(lat_lng[1])
lat2 = math.radians(location_info['lat'])
lon2 = math.radians(location_info['lng'])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
if distance > 100:
return 0
if {'point_of_interest', 'premise'}.intersection(set(location_info.get('types', ''))):
score = pow(max(
cls.get_similarity(query_name, location_info['name']),
cls.get_similarity(query_name, location_info['formatted_address'])), 1.0/3)
else:
score = 0
return score
@classmethod
def update_team_location(cls, team):
if not team.location:
return
# # Try with and without textsearch, pick best
# location_info, score = cls.get_team_location_info(team)
# if score < 0.7:
# logging.warning("Using textsearch for {}".format(team.key.id()))
# location_info2, score2 = cls.get_team_location_info(team, textsearch=True)
# if score2 > score:
# location_info = location_info2
# score = score2
# # Log performance
# text = "Team {} location score: {}".format(team.key.id(), score)
# if score < 0.8:
# logging.warning(text)
# else:
# logging.info(text)
# # Don't trust anything below a certain threshold Super strict for now.
# if score < 0.9:
# logging.warning("Location score too low for team {}".format(team.key.id()))
# location_info = {}
location_info = {} # Force imprecise locations
# Fallback to location only
if n
|
ot location_info:
# logging.warning("Falling back to location only for team {}".format(team.key.id()))
geocode_result = cls.google_maps_geocode_async(team.location).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0], auto_fill=False).get_result()
# Fallback to city, country
if not location_info:
logging.warning("Falling bac
|
k to city/country only for team {}".format(team.key.id()))
city_country = u'{} {}'.format(
team.city if team.city else '',
team.country if team.country else '')
geocode_result = cls.google_maps_geocode_async(city_country).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0], auto_fill=False).get_result()
else:
logging.warning("Team {} location failed!".format
|
benjspriggs/tumb-borg
|
tumb_borg/process.py
|
Python
|
apache-2.0
| 1,044
| 0.01341
|
#!/usr/bin/env python3
delineator = "//"
hashtag = "#"
# generate poems from a file
# out: list of poem lines
def generate_poems(filename):
g = []
# get to the first poem in the file
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if line
|
.startswith( delineator ) and g:
yield g
g = []
if line:
g.append(line)
yield g
#
|
convert a list of strings
# into a poem dictionary
def to_dictionary(poem_lines):
d = {}
d['content'] = []
d['tags'] = []
tags = []
for line in poem_lines:
if line.startswith( delineator ):
d['title'] = line.lstrip( delineator ).strip()
elif line.startswith( hashtag ):
tags.append(line)
else:
d['content'].append(line) # do not strip to preserve indentation
for line in tags:
for tag in \
(t.strip() for t in line.split( hashtag ) if t):
d['tags'].append(tag)
return d
|
wolfgangz2013/rt-thread
|
bsp/at91sam9g45/rtconfig.py
|
Python
|
apache-2.0
| 3,724
| 0.008861
|
import os
ARCH = 'arm'
CPU = 'arm926'
# toolchains options
CROSS_TOOL = 'gcc'
#------- toolchains path -------------------------------------------------------
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC'
|
)
if CROSS_TOOL == 'gcc':
PLATFORM = 'g
|
cc'
EXEC_PATH = r'D:\arm-2013.11\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = 'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = 'C:/Program Files (x86)/IAR Systems/Embedded Workbench 7.2'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
CORE = 'arm926ej-s'
MAP_FILE = 'rtthread_at91sam9g45.map'
LINK_FILE = 'link_scripts/at91sam9g45_ram'
TARGET_NAME = 'rtthread.bin'
#------- GCC settings ----------------------------------------------------------
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=arm926ej-s'
CFLAGS = DEVICE
AFLAGS = '-c'+ DEVICE + ' -x assembler-with-cpp'
AFLAGS += ' -Iplatform'
LFLAGS = DEVICE
LFLAGS += ' -Wl,--gc-sections,-cref,-Map=' + MAP_FILE
LFLAGS += ' -T ' + LINK_FILE + '.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET ' + TARGET_NAME + '\n'
POST_ACTION += SIZE + ' $TARGET\n'
#------- Keil settings ---------------------------------------------------------
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
EXEC_PATH += '/arm/armcc/bin/'
DEVICE = ' --cpu=' + CORE
CFLAGS = DEVICE + ' --apcs=interwork --diag_suppress=870'
AFLAGS = DEVICE + ' -Iplatform'
LFLAGS = DEVICE + ' --strict'
LFLAGS += ' --info sizes --info totals --info unused --info veneers'
LFLAGS += ' --list ' + MAP_FILE
LFLAGS += ' --scatter ' + LINK_FILE + '.scat'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output ' + TARGET_NAME + ' \n'
POST_ACTION += 'fromelf -z $TARGET\n'
#------- IAR settings ----------------------------------------------------------
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = CORE
CFLAGS = '--cpu=' + DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' -e'
CFLAGS += ' --fpu=none'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = '--cpu '+ DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --fpu none'
AFLAGS += ' -S'
AFLAGS += ' -Iplatform'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = '--config ' + LINK_FILE +'.icf'
LFLAGS += ' --entry __iar_program_start'
LFLAGS += ' --map ' + MAP_FILE
LFLAGS += ' --silent'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --silent --bin $TARGET ' + TARGET_NAME
|
kaankizilagac/sozluk
|
sozluk/topics/signals.py
|
Python
|
mit
| 412
| 0.002427
|
#
|
-*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
def check_junior(sender, instance, created, **kwargs):
# from .models import Entry # avoid circled import
if created and instance.user.junior:
total_entry = sender.objects.filt
|
er(user=instance.user).count()
if total_entry >= 2:
instance.user.junior = False
instance.user.save()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.