repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
czcorpus/kontext | lib/plugins/mysql_subcmixer/metadata_model.py | Python | gpl-2.0 | 10,170 | 0.003147 | # Copyright (c) 2022 Institute of the Czech National Corpus
# Copyright (c) 2022 Martin Zimandl <martin.zimandl@gmail.com>
# Copyright (c) 2015 Martin Stepan <martin.stepan@ff.cuni.cz>,
# Tomas Machalek <tomas.machalek@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from typing import Optional, Set, Tuple, List, Dict
import numpy as np
import pulp
from mysql.connector.connection import MySQLConnection
from mysql.connector.cursor import MySQLCursor
from plugins.abstract.integration_db import IntegrationDatabase
from .category_tree import CategoryTree, CategoryTreeNode
class CorpusComposition(object):
def __init__(
self, status: Optional[str], variables: List[int], size_assembled: int,
category_sizes: List[int], used_bounds: List[int], num_texts: Optional[int] = None):
self.status = status
self.variables = variables
self.size_assembled = size_assembled
self.category_sizes = category_sizes
self.num_texts = num_texts
self.used_bounds = used_bounds
def __repr__(self):
return 'CorpusComposition(status: %s, size: %s, num_texts: %s, num_vars: %s)' % (
self.status, self.size_assembled, self.num_texts, len(self.variables)
if self.variables is not None else None)
class MetadataModel:
"""
This class represents the linear optimization model for given categoryTree.
arguments:
meta_db -- a Database instance
category_tree -- a tree holding the user input
id_attr -- an unique identifier of a 'bibliography' item (defined in corpora.xml).
"""
def __init__(
self, db: IntegrationDatabase[MySQLConnection, MySQLCursor], category_tree: CategoryTree, id_attr: str):
self._db = db
self.category_tree = category_tree
self._id_struct, self._id_attr = id_attr.split('.')
self.text_sizes, self._id_map = self._get_text_sizes()
# text_sizes and _id_map both contain all the documents from the corpus
# no matter whether they have matching aligned counterparts
self.num_texts = len(self.text_sizes)
self.b = np.zeros(self.category_tree.num_categories - 1) # required sizes, bounds
self.A = np.zeros((self.category_tree.num_categories, self.num_texts))
used_ids: Set[int] = set()
self._init_ab(self.category_tree.root_node, used_ids)
# for items without aligned counterparts we create
# conditions fulfillable only for x[i] = 0
self._init_ab_nonalign(used_ids)
def _get_text_sizes(self) -> Tuple[List[int], Dict[int, int]]:
"""
List all the texts matching main corpus. This will be the
base for the 'A' matrix in the optimization problem.
In case we work with aligned corpora we still want
the same result here as the non-aligned items from
the primary corpus will not be selected in
_init_ab() due to applied self JOIN
(append_aligned_corp_sql())
Also generate a map "db_ID -> row index" to be able
to work with db-fetched subsets of the texts and
matching them with the 'A' matrix (i.e. in a filtered
result a record has a different index then in
all the records list).
"""
sql = f'''
SELECT MIN(t_tuple.id) AS db_id, SUM(t_tuple.poscount) AS poscount
FROM corpus_structattr_value AS t_value
JOIN corpus_structattr_value_mapping AS t_map ON t_map.value_id = t_value.id
JOIN corpus_structattr_value_tuple AS t_tuple ON t_tuple.id = t_map.value_tuple_id
WHERE t_value.corpus_name = %s AND t_value.structure_name = %s AND t_value.structattr_name = %s
GROUP BY t_value.value
ORDER BY db_id
'''
sizes = []
id_map = {}
with self._db.cursor() as cursor:
cursor.execute(sql, (self.category_tree.corpus_id, self._id_struct, self._id_attr))
for i, row in enumerate(cursor):
sizes.append(int(row['poscount']))
id_map[row['db_id']] = i
return sizes, id_map
def _init_ab_nonalign(self, used_ids: Set[int]) -> None:
# Now we process items with no aligned counterparts.
# In this case we must define a condition which will be
# fulfilled if X[i] == 0
for k, v in self._id_map.items():
if k not in used_ids:
for i in range(1, len(self.b)):
self.A[i][v] = self.b[i] * 2 if self.b[i] > 0 else 10000
def _init_ab(self, node: CategoryTreeNode, used_ids: Set[int]) -> None:
"""
Initialization method for coefficient matrix (A) and vector of bounds (b)
Recursively traverses all nodes of given categoryTree starting from its root.
Each node is processed in order to generate one inequality constraint.
args:
node -- currently processed node of the categoryTree
used_ids -- a set of ids used in previous nodes
"""
if node.metadata_condition is not None:
sql_items = [
f'''
SELECT t_map.value_tuple_id
FROM corpus_structattr_value AS t_value
JOIN corpus_structattr_value_mapping AS t_map ON t_map.value_id = t_value.id
WHERE t_value.corpus_name = %s AND t_value.structure_name = %s AND t_value.structattr_name = %s
AND t_value.value {mc.mysql_op} %s
'''
for subl in node.metadata_condition
for mc in subl
]
# using SQL 'INTERSECT would be better here but it is quite a new feature so let's keep this one for now
un_all = ' UNION ALL '.join(sql_items)
sql_intersect = f'SELECT tmp.value_tuple_id, COUNT(*) AS num FROM ({un_all}) AS tmp GROUP BY ' \
f'tmp.value_tuple_id HAVING num = {len(sql_items)} '
aligned_join = [
f'INNER JOIN corpus_structattr_value_tuple AS a{i} ON a{i}. | corpus_name = %s AND a{i}.item_id | = t_tuple.item_id'
for i in range(len(self.category_tree.aligned_corpora))
]
sql = f'''
SELECT MIN(tuple_ids.value_tuple_id) AS db_id, SUM(t_tuple.poscount) AS poscount
FROM (
{sql_intersect}
) as tuple_ids
JOIN corpus_structattr_value_mapping AS t_map ON t_map.value_tuple_id = tuple_ids.value_tuple_id
JOIN corpus_structattr_value AS t_value ON t_value.id = t_map.value_id
JOIN corpus_structattr_value_tuple AS t_tuple ON t_tuple.id = tuple_ids.value_tuple_id
{' '.join(aligned_join)}
WHERE t_value.corpus_name = %s AND t_value.structure_name = %s AND t_value.structattr_name = %s
GROUP BY t_value.value
ORDER BY db_id
'''
params = tuple(
param
for subl in node.metadata_condition
for mc in subl
for param in (self.category_tree.corpus_id, mc.struct, mc.attr, mc.value)
)
params += tuple(self.category_tree.aligned_corpora)
params += (self.category_tree.corpus_id, self._id_struct, self._id_attr)
with self._db.cursor() as cursor:
cursor.execute(sql, |
minlex/django-socialregistration | socialregistration/contrib/openid/migrations/0001_initial.py | Python | mit | 2,204 | 0.002269 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import socialregistration.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='OpenIDNonce',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('server_url', models.CharField(max_length=255)),
('timestamp', models.IntegerField()),
('salt', models.CharField(max_length=255)),
('date_created', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OpenIDProfile',
| fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('identity', models.TextField()),
('site', models.ForeignKey(default=socialregistration.models.get_default_site, to='sites.Site')),
('user', mod | els.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OpenIDStore',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('server_url', models.CharField(max_length=255)),
('handle', models.CharField(max_length=255)),
('secret', models.TextField()),
('issued', models.IntegerField()),
('lifetime', models.IntegerField()),
('assoc_type', models.TextField()),
('site', models.ForeignKey(default=socialregistration.models.get_default_site, to='sites.Site')),
],
options={
},
bases=(models.Model,),
),
]
|
tensorflow/tfx | tfx/dsl/input_resolution/strategies/latest_blessed_model_strategy_test.py | Python | apache-2.0 | 2,902 | 0.001378 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for LatestBlessedModelStrategy."""
import tensorflow as tf
from tfx import types
from tfx.components.model_validator import constants as model_validator
from tfx.dsl.input_resolution.strategies import latest_blessed_model_strategy
from tfx.orchestration import metadata
from tfx.types import standard_artifacts
from tfx.utils import test_case_utils
from ml_metadata.proto import metadata_store_pb2
class LatestBlessedModelStrategyTest(test_case_utils.TfxTest):
def setUp(self):
super().setUp()
self._connection_config = metadata_store_pb2.ConnectionConfig()
self._connection_config.sqlite.SetInParent()
self._metadata = self.enter_context(
metadata.Metadata(connection_config=self._connection_config))
self._store = self._metadata.store
def _set_model_blessing_bit(self, artifact: types.Artifact, model_id: int,
is_blessed: int):
artifact.mlmd_artifact.custom_properties[
model_validator.ARTIFACT | _PROPERTY_BLESSED_KEY].int_value = is_blessed
artifact.mlmd_artifact.custom_properties[
model_validator
.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY].int_value = model_id
def testStrategy(self):
# | Model with id 1, will be blessed.
model_one = standard_artifacts.Model()
model_one.uri = 'model_one'
model_one.id = 1
# Model with id 2, will be blessed.
model_two = standard_artifacts.Model()
model_two.uri = 'model_two'
model_two.id = 2
# Model with id 3, will not be blessed.
model_three = standard_artifacts.Model()
model_three.uri = 'model_three'
model_three.id = 3
model_blessing_one = standard_artifacts.ModelBlessing()
self._set_model_blessing_bit(model_blessing_one, model_one.id, 1)
model_blessing_two = standard_artifacts.ModelBlessing()
self._set_model_blessing_bit(model_blessing_two, model_two.id, 1)
strategy = latest_blessed_model_strategy.LatestBlessedModelStrategy()
result = strategy.resolve_artifacts(
self._store, {
'model': [model_one, model_two, model_three],
'model_blessing': [model_blessing_one, model_blessing_two]
})
self.assertIsNotNone(result)
self.assertEqual([a.uri for a in result['model']], ['model_two'])
if __name__ == '__main__':
tf.test.main()
|
rralcala/random-scripts | py/lc/6/1496.py | Python | gpl-2.0 | 784 | 0 | import unittest
class Solution:
def isPathCrossing(self, path: str) -> bool:
fullpath = {"0-0": 1}
x = 0
y = 0
for move in path:
if move == "N":
x += 1
elif m | ove == "S":
x -= 1
elif move == "E":
y -= 1
else:
y += 1
pos = f"{x}-{y}"
if pos in fullpath:
return True
else:
fullpath[pos] = 1
return False
class TestCase(unittest.TestCase):
def test(self):
self.assertEquals(Solution().isPathCrossing("NES"), False)
self.assertEqu | als(Solution().isPathCrossing("NESWW"), True)
self.assertEquals(Solution().isPathCrossing("N"), False)
|
openstack/oslo.context | doc/source/user/examples/usage_user_identity.py | Python | apache-2.0 | 1,507 | 0 | #!/usr/bin/env python3
#
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific | language governing permissions and limitations
# under the License.
"""A usage example of Oslo Context with user_identity
This example requires the following modules to be installed.
$ pip install oslo.context oslo.log
More information can be found at:
https://docs.ope | nstack.org/oslo.context/latest/user/index.html
"""
from oslo_config import cfg
from oslo_context import context
from oslo_log import log as logging
CONF = cfg.CONF
DOMAIN = "demo"
logging.register_options(CONF)
CONF.logging_user_identity_format = "%(user)s/%(tenant)s@%(project_domain)s"
logging.setup(CONF, DOMAIN)
LOG = logging.getLogger(__name__)
LOG.info("Message without context")
# ids in Openstack are 32 characters long
# For readability a shorter id value is used
context.RequestContext(request_id='req-abc',
user='6ce90b4d',
tenant='d6134462',
project_domain='a6b9360e')
LOG.info("Message with context")
|
abenassi/amtrak-trip | amtrak.py | Python | mit | 5,392 | 0.000185 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
amtrak
Parse a trip itinerary of amtrak services copied into a text file.
Running the file will take a trip.txt file and output a .json with one record
for each amtrak service in the trip. You can also use the main method of the
module. Both cases, the first parameter would be the input and the second one
would be the output.
Example:
$ python amtrak.py
$ python amtrak.py trip.txt
$ python amtrak.py trip.txt /json/amtrak-trip.json
import amtrak
amtrak.main()
amtrak.main("trip.txt")
amtrak.main("trip.txt", "/json/amtrak-trip.json")
See the "trip.txt" file in this directory for an example of the type of amtrak
itinerary e-mail that | is supported for the parsers in this repo.
"""
from __future__ import unicode_literals
import copy
import json
import arrow
import sys
from modules impor | t parsers
class AmtrakServiceParser(object):
"""Parse Amtrak service information from confirmation email lines.
Attributes:
name (str): Name of the service.
departure_station (str): Station where the service starts.
departure_state (str): State where the service starts.
departure_city (str): City where the service starts.
departure_date (str): Date and time when the service starts.
arrival_station (str): Station where the service ends.
arrival_state (str): State where the service ends.
arrival_city (str): City where the service ends.
arrival_date (str): Date and time when the service ends.
accommodation (str): Type of accommodation.
"""
def __init__(self):
self.name = None
self.departure_station = None
self.departure_state = None
self.departure_city = None
self.departure_date = None
self.arrival_station = None
self.arrival_state = None
self.arrival_city = None
self.arrival_date = None
self.accommodation = None
def parse(self, line):
"""Parse one line of the amtrak itinerary.
It will add information to the parser until last item has been parsed,
then it will return a new record and clean the parser from any data.
Args:
line (str): A line of an amtrak itinerary.
Returns:
dict: All the information parsed of a single service.
Example:
{"name": "49 Lake Shore Ltd.",
"departure_station": "New York (Penn Station), New York",
"departure_state": "New York",
"departure_city": "New York",
"departure_date": '2015-05-18T15:40:00+00:00',
"arrival_station": "Chicago (Chicago Union Station), Illinois",
"arrival_state": "Illinois",
"arrival_city": "Chicago",
"arrival_date": '2015-05-19T09:45:00+00:00',
"accommodation": "1 Reserved Coach Seat"}
"""
for parser in parsers.get_parsers():
if parser.accepts(line):
key, value = parser.parse(line)
if not key == "date":
self.__dict__[key] = value
# date could be departure or arrival, departure is always first
else:
if not self.departure_date:
self.departure_date = value.isoformat()
else:
self.arrival_date = value.isoformat()
if self._service_info_complete():
RV = copy.copy(self.__dict__)
self.__init__()
else:
RV = None
return RV
def _service_info_complete(self):
for value in self.__dict__.values():
if not value:
return False
return True
def parse_services(filename='trip.txt'):
"""Parse all services from an amtrak itinerary.
Args:
filename (str): Path to a text file with an amtrak itinerary.
Yields:
dict: New record with data about a service.
"""
parser = AmtrakServiceParser()
with open(filename, 'rb') as f:
for line in f.readlines():
new_record = parser.parse(line)
if new_record:
yield new_record
def add_calc_fields(service):
"""Write the duration of a service into an new field."""
service["duration"] = _calc_duration(service)
return service
def _calc_duration(service):
"""Calculates the duration of a service."""
duration = arrow.get(service["arrival_date"]) - \
arrow.get(service["departure_date"])
return round(duration.total_seconds() / 60 / 60, 1)
def write_services_to_json(services, file_name="./json/amtrak-trip.json"):
"""Write parsed services to a json file.
Args:
services (dict): Parsed services.
file_name (str): Path of the json file to write in.
"""
with open(file_name, "w") as f:
f.write(json.dumps(services, indent=4))
def main(filename='trip.txt', file_name="./json/amtrak-trip.json"):
services = [add_calc_fields(service) for service
in parse_services(filename)]
write_services_to_json(services, file_name)
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1])
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
else:
main()
|
inovtec-solutions/OpenERP | openerp/addons/mail/tests/test_mail_base.py | Python | agpl-3.0 | 4,516 | 0.00155 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMailBase(common.TransactionCase):
def _mock_smtp_gateway(self, *args, **kwargs):
return args[2]['Message-Id']
def _init_mock_build_email(self):
self._build_email_args_list = []
self._build_email_kwargs_list = []
def _mock_build_email(self, *args, **kwargs):
""" Mock build_email to be able to test its values. Store them into
some internal variable for latter processing. """
self._build_email_args_list.append(args)
self._build_email_kwargs_list.append(kwargs)
return self._build_email(*args, **kwargs)
def setUp(self):
super(TestMailBase, self).setUp()
cr, uid = self.cr, self.uid
# Install mock SMTP gateway
self._init_mock_build_email()
self._build_email = self.registry('ir.mail_server').build_email
self.registry('ir.mail_server').build_email = self._mock_build_email
self._send_email = self.registry('ir.mail_server').send_email
self.registry('ir.mail_server').send_email = self._mock_smtp_gateway
# Usefull models
self.ir_model = self.registry('ir.model')
self.ir_model_data = self.registry('i | r.mod | el.data')
self.ir_attachment = self.registry('ir.attachment')
self.mail_alias = self.registry('mail.alias')
self.mail_thread = self.registry('mail.thread')
self.mail_group = self.registry('mail.group')
self.mail_mail = self.registry('mail.mail')
self.mail_message = self.registry('mail.message')
self.mail_notification = self.registry('mail.notification')
self.mail_followers = self.registry('mail.followers')
self.mail_message_subtype = self.registry('mail.message.subtype')
self.res_users = self.registry('res.users')
self.res_partner = self.registry('res.partner')
# Find Employee group
group_employee_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
self.group_employee_id = group_employee_ref and group_employee_ref[1] or False
# Test users to use through the various tests
self.user_raoul_id = self.res_users.create(cr, uid,
{'name': 'Raoul Grosbedon', 'signature': 'SignRaoul', 'email': 'raoul@raoul.fr', 'login': 'raoul', 'groups_id': [(6, 0, [self.group_employee_id])]})
self.user_bert_id = self.res_users.create(cr, uid,
{'name': 'Bert Tartignole', 'signature': 'SignBert', 'email': 'bert@bert.fr', 'login': 'bert', 'groups_id': [(6, 0, [])]})
self.user_raoul = self.res_users.browse(cr, uid, self.user_raoul_id)
self.user_bert = self.res_users.browse(cr, uid, self.user_bert_id)
self.user_admin = self.res_users.browse(cr, uid, uid)
self.partner_admin_id = self.user_admin.partner_id.id
self.partner_raoul_id = self.user_raoul.partner_id.id
self.partner_bert_id = self.user_bert.partner_id.id
# Test 'pigs' group to use through the various tests
self.group_pigs_id = self.mail_group.create(cr, uid,
{'name': 'Pigs', 'description': 'Fans of Pigs, unite !'},
{'mail_create_nolog': True})
self.group_pigs = self.mail_group.browse(cr, uid, self.group_pigs_id)
def tearDown(self):
# Remove mocks
self.registry('ir.mail_server').build_email = self._build_email
self.registry('ir.mail_server').send_email = self._send_email
super(TestMailBase, self).tearDown()
|
l0k1-smirenski/steemportal | src/web.py | Python | unlicense | 1,995 | 0.009023 | #!/usr/bin/python3
#
# web.py
#
# Interface frontend for a webserver interface module
#
# Here's your stinkin' licence: http://unlicense.org/
#
# - l0k1
#
# Contact details for l0k1
# steemit.com - https://steemit.com/@l0k1
# bitmessage - BM-2cXWxTVaXJbNyMxv5tAjNg87xS98hrAg8P
# torchat - xq6xcvqc2vy34qtx
# email - l0k1@null.net
def printmodulename ():
print ("Importing web frontend module")
class SPinterface ():
"""
Each distinct interface module will replicate the same class and core
module's interfacing calls. Each module can contain other classes and
functions, including imports, that are relevant to the module. This top
level class abstracts the calls so the core can remain agnostic about
which frontend is being used.
"""
def __init__(self, config_obj, core_obj):
"""
Interface initialisation
This function opens the window, | calls back through the core, to
get the configuration, to load the initial interface.
The configuration tells the interface what to set up in the initial
startup.
"""
print ("starting interface")
self.config = config_obj
self.core = core_obj
def open ():
| """
This opens up the main window, places all the widgets, and from the
configuration, loads the content that belongs in them.
"""
pass
def persist ():
"""
This collects the current interface status for shutdown of the app
and
"""
pass
def config ():
"""
This opens up the user configuration dialog for user configuration
of the interface and specifying the users' desired modes of
operation
"""
pass
def open_url (urlstring):
"""
This takes the parameter of a URL and queries the core to gather
the data required to display the new URL
"""
pass
|
vqw/frappe | frappe/integration_broker/doctype/integration_service/test_integration_service.py | Python | mit | 293 | 0.006826 | # -*- coding: utf-8 -*-
# Copyri | ght (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records | = frappe.get_test_records('Integration Service')
class TestIntegrationService(unittest.TestCase):
pass
|
labero/kupfer | waflib/Options.py | Python | gpl-3.0 | 7,752 | 0.025284 | #!/usr/bin/env python
# encoding: utf-8
# Scott Newton, 2005 (scottn)
# Thomas Nagy, 2006-2010 (ita)
"""
Support for waf command-line options
Provides default command-line options,
as well as custom ones, used by the ``options`` wscript function.
"""
import os, tempfile, optparse, sys, re
from waflib import Logs, Utils, Context
cmds = 'distclean configure build install clean uninstall check dist distcheck'.split()
"""
Constant representing the default waf commands displayed in::
$ waf --help
"""
options = {}
"""
A dictionary representing the command-line options::
$ waf --foo=bar
"""
commands = []
"""
List of commands to execute extracted from the command-line. This list is consumed during the execution, see :py:func:`waflib.Scripting.run_commands`.
"""
lockfile = os.environ.get('WAFLOCK', '.lock-waf_%s_build' % sys.platform)
try: cache_global = os.path.abspath(os.environ['WAFCACHE'])
except KeyError: cache_global = ''
platform = Utils.unversioned_sys_platform()
class opt_parser(optparse.OptionParser):
"""
Command-line options parser.
"""
def __init__(self, ctx):
optparse.OptionParser.__init__(self, conflict_handler="resolve", version='waf %s (%s)' % (Context.WAFVERSION, Context.WAFREVISION))
self.formatter.width = Logs.get_term_cols()
p = self.add_option
self.ctx = ctx
jobs = ctx.jobs()
p('-j', '--jobs', dest='jobs', default=jobs, type='int', help='amount of parallel jobs (%r)' % jobs)
p('-k', '--keep', dest='keep', default=0, action='count', help='keep running happily even if errors are found')
p('-v', '--verbose', dest='verbose', default=0, action='count', help='verbosity level -v -vv or -vvv [default: 0]')
p('--nocache', dest='nocache', default=False, action='store_true', help='ignore the WAFCACHE (if set)')
p('--zones', dest='zones', default='', action='store', help='debugging zones (task_gen, deps, tasks, etc)')
gr = optparse.OptionGroup(self, 'configure options')
self.add_option_group(gr)
gr.add_option('-o', '--out', action='store', default='', help='build dir for the project', dest='out')
gr.add_option('-t', '--top', action='store', default='', help='src dir for the project', dest='top')
default_prefix = os.environ.get('PREFIX')
if not default_prefix:
if platform == 'win32':
d = tempfile.gettempdir()
default_prefix = d[0].upper() + d[1:]
# win32 preserves the case, but gettempdir does not
else:
default_prefix = '/usr/local/'
gr.add_option('--prefix', dest='prefix', defaul | t=default_prefix, help='installation prefix [default: %r]' % default_prefix)
gr.add_option('--download', dest='download', default=False, action='store_true', help='try to download the tools if missing')
gr = optparse.OptionGroup(self, 'build and install options')
self.add_option_group(gr)
gr.add_option('-p', '--progress', dest='progress_bar', default=0, action='count', help= '-p: progre | ss bar; -pp: ide output')
gr.add_option('--targets', dest='targets', default='', action='store', help='task generators, e.g. "target1,target2"')
gr = optparse.OptionGroup(self, 'step options')
self.add_option_group(gr)
gr.add_option('--files', dest='files', default='', action='store', help='files to process, by regexp, e.g. "*/main.c,*/test/main.o"')
default_destdir = os.environ.get('DESTDIR', '')
gr = optparse.OptionGroup(self, 'install/uninstall options')
self.add_option_group(gr)
gr.add_option('--destdir', help='installation root [default: %r]' % default_destdir, default=default_destdir, dest='destdir')
gr.add_option('-f', '--force', dest='force', default=False, action='store_true', help='force file installation')
def get_usage(self):
"""
Return the message to print on ``waf --help``
"""
cmds_str = {}
for cls in Context.classes:
if not cls.cmd or cls.cmd == 'options':
continue
s = cls.__doc__ or ''
cmds_str[cls.cmd] = s
if Context.g_module:
for (k, v) in Context.g_module.__dict__.items():
if k in ['options', 'init', 'shutdown']:
continue
if type(v) is type(Context.create_context):
if v.__doc__ and not k.startswith('_'):
cmds_str[k] = v.__doc__
just = 0
for k in cmds_str:
just = max(just, len(k))
lst = [' %s: %s' % (k.ljust(just), v) for (k, v) in cmds_str.items()]
lst.sort()
ret = '\n'.join(lst)
return '''waf [commands] [options]
Main commands (example: ./waf build -j4)
%s
''' % ret
class OptionsContext(Context.Context):
"""
Collect custom options from wscript files and parses the command line.
Set the global :py:const:`waflib.Options.commands` and :py:const:`waflib.Options.options` values.
"""
cmd = 'options'
fun = 'options'
def __init__(self, **kw):
super(OptionsContext, self).__init__(**kw)
self.parser = opt_parser(self)
"""Instance of :py:class:`waflib.Options.opt_parser`"""
self.option_groups = {}
def jobs(self):
"""
Find the amount of cpu cores to set the default amount of tasks executed in parallel. At
runtime the options can be obtained from :py:const:`waflib.Options.options` ::
from waflib.Options import options
njobs = options.jobs
:return: the amount of cpu cores
:rtype: int
"""
count = int(os.environ.get('JOBS', 0))
if count < 1:
if 'NUMBER_OF_PROCESSORS' in os.environ:
# on Windows, use the NUMBER_OF_PROCESSORS environment variable
count = int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
else:
# on everything else, first try the POSIX sysconf values
if hasattr(os, 'sysconf_names'):
if 'SC_NPROCESSORS_ONLN' in os.sysconf_names:
count = int(os.sysconf('SC_NPROCESSORS_ONLN'))
elif 'SC_NPROCESSORS_CONF' in os.sysconf_names:
count = int(os.sysconf('SC_NPROCESSORS_CONF'))
if not count and os.name not in ('nt', 'java'):
try:
tmp = self.cmd_and_log(['sysctl', '-n', 'hw.ncpu'], quiet=0)
except Exception:
pass
else:
if re.match('^[0-9]+$', tmp):
count = int(tmp)
if count < 1:
count = 1
elif count > 1024:
count = 1024
return count
def add_option(self, *k, **kw):
"""
Wrapper for optparse.add_option::
def options(ctx):
ctx.add_option('-u', '--use', dest='use', default=False, action='store_true',
help='a boolean option')
"""
self.parser.add_option(*k, **kw)
def add_option_group(self, *k, **kw):
"""
Wrapper for optparse.add_option_group::
def options(ctx):
ctx.add_option_group('some options')
gr.add_option('-u', '--use', dest='use', default=False, action='store_true')
"""
try:
gr = self.option_groups[k[0]]
except:
gr = self.parser.add_option_group(*k, **kw)
self.option_groups[k[0]] = gr
return gr
def get_option_group(self, opt_str):
"""
Wrapper for optparse.get_option_group::
def options(ctx):
gr = ctx.get_option_group('configure options')
gr.add_option('-o', '--out', action='store', default='',
help='build dir for the project', dest='out')
"""
try:
return self.option_groups[opt_str]
except KeyError:
for group in self.parser.option_groups:
if group.title == opt_str:
return group
return None
def parse_args(self, _args=None):
"""
Parse arguments from a list (not bound to the command-line).
:param _args: arguments
:type _args: list of strings
"""
global options, commands
(options, leftover_args) = self.parser.parse_args(args=_args)
commands = leftover_args
if options.destdir:
options.destdir = os.path.abspath(os.path.expanduser(options.destdir))
if options.verbose >= 1:
self.load('errcheck')
def execute(self):
"""
See :py:func:`waflib.Context.Context.execute`
"""
super(OptionsContext, self).execute()
self.parse_args()
|
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/persistence/autosaver.py | Python | gpl-3.0 | 2,170 | 0.001843 | '''
Task Coach - Your friendly task manager
Copyright (C) 2004-2013 Task Coach developers <developers@taskcoach.org>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from taskcoachlib.thirdparty.pubsub import pub
import wx
class AutoSaver(object):
''' AutoSaver observes task files. If a task file is changed by the user
(gets 'dirty') and auto save is on, AutoSaver saves the task file. '''
def __init__(self, | settings, *args, **kwargs):
super(AutoSaver, self).__init__(*args, **kwargs)
self.__settings = settings
self.__task_files = set()
pub.subscribe(self.onTaskFileDirty, 'taskfile.dirty')
wx.GetApp().Bind(wx.EVT_IDLE, self.on_idle)
def onTaskFileDirty(self, ta | skFile):
''' When a task file gets dirty and auto save is on, note it so
it can be saved during idle time. '''
if self._needSave(taskFile):
self.__task_files.add(taskFile)
def _needSave(self, task_file):
''' Return whether the task file needs to be saved. '''
return task_file.filename() and task_file.needSave() and \
self.__settings.getboolean('file', 'autosave')
def _needLoad(self, taskFile):
return taskFile.changedOnDisk() and \
self.__settings.getboolean('file', 'autoload')
def on_idle(self, event):
''' Actually save the dirty files during idle time. '''
event.Skip()
while self.__task_files:
task_file = self.__task_files.pop()
if self._needSave(task_file):
task_file.save()
|
zzcclp/carbondata | python/pycarbon/tests/core/test_carbon_tf_dataset.py | Python | apache-2.0 | 10,899 | 0.010643 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import os
from copy import copy
import numpy as np
import pytest
import tensorflow as tf
from petastorm.ngram import NGram
from petastorm.predicates import in_lambda
from pycarbon.core.carbon_reader import make_carbon_reader, make_batch_carbon_reader
from pycarbon.core.carbon_tf_utils import make_pycarbon_dataset
from pycarbon.tests.conftest import _ROWS_COUNT
from pycarbon.tests.core.test_carbon_common import TestSchema
import jnius_config
jnius_config.set_classpath(pytest.config.getoption("--carbon-sdk-path"))
if pytest.config.getoption("--pyspark-python") is not None and \
pytest.config.getoption("--pyspark-driver-python") is not None:
os.environ['PYSPARK_PYTHON'] = pytest.config.getoption("--pyspark-python")
os.environ['PYSPARK_DRIVER_PYTHON'] = pytest.config.getoption("--pyspark-driver-python")
elif 'PYSPARK_PYTHON' in os.environ.keys() and 'PYSPARK_DRIVER_PYTHON' in os.environ.keys():
pass
else:
raise ValueError("please set PYSPARK_PYTHON and PYSPARK_DRIVER_PYTHON variables, "
"using cmd line "
"--pyspark-python=PYSPARK_PYTHON_PATH --pyspark-driver-python=PYSPARK_DRIVER_PYTHON_PATH "
"or set PYSPARK_PYTHON and PYSPARK_DRIVER_PYTHON in system env")
_EXCLUDE_FIELDS = set(TestSchema.fields.values()) - {TestSchema.decimal}
ALL_READER_FLAVOR_FACTORIES = [
lambda url, **kwargs: make_carbon_reader(url, **_merge_params({'reader_pool_type': 'thread', 'workers_count': 1,
'schema_fields': _EXCLUDE_FIELDS}, kwargs)),
]
def _merge_params(base, overwrite):
"""Merges two dictionaries when values from ``overwrite`` takes precedence over values of ``base`` dictionary.
Both input parameters are not modified.
:param base: A dictionary
:param overwrite: A dictionary. If a value with the same key exists in ``base``, it is overwritten by the value from
this dictionary.
:return: A combined dictionary
"""
# Create a shallow copy of base
combined = copy(base)
combined.update(overwrite)
return combined
@pytest.mark.forked
@pytest.mark.parametrize('reader_factory', ALL_READER_FLAVOR_FACTORIES)
def test_with_one_shot_iterator(carbon_synthetic_dataset, reader_factory):
"""Just a bunch of read and compares of all values to the expected values"""
with reader_factory(carbon_synthetic_dataset.url) as reader:
dataset = make_pycarbon_dataset(reader)
iterator = dataset.make_one_shot_iterator()
# Make sure we have static shape info for all fields
for shape in dataset.output_shapes:
# TODO(yevgeni): check that the shapes are actually correct, not just not None
assert shape.dims is not None
# Read a bunch of entries from the dataset and compare the data to reference
with tf.Session() as sess:
iterator = iterator.get_next()
for _, _ in enumerate(carbon_synthetic_dataset.data):
actual = sess.run(iterator)._asdict()
expected = next(d for d in carbon_synthetic_dataset.data if d['id'] == actual['id'])
for key in actual.keys():
if isinstance(expected[key], str):
# Tensorflow returns all strings as bytes in python3. So we will need to decode it
actual_value = actual[key].decode()
elif isinstance(expected[key], np.ndarray) and expected[key].dtype.type == np.unicode_:
actual_value = np.array([item.decode() for item in actual[key]])
else:
actual_value = actual[key]
np.testing.assert_equal(actual_value, expected[key])
# Exhausted one full epoch. Fetching next value should trigger OutOfRangeError
with pytest.raises(tf.errors.OutOfRangeError):
sess.run(iterator)
@pytest.mark.forked
@pytest.mark.parametrize('reader_factory', ALL_READER_FLAVOR_FACTORIES)
def test_with_dataset_repeat(carbon_synthetic_dataset, reader_factory):
"""``tf.data.Dataset``'s ``repeat`` should not be used on ``make_pycarbon_dataset`` due to high costs of
``Reader initialization``. A user should use ``Reader`` built-in epochs support. Check that we raise an
error to alert of misuse."""
with reader_factory(carbon_synthetic_dataset.url) as reader:
dataset = make_pycarbon_dataset(reader)
dataset = dataset.repeat(2)
iterator = dataset.make_one_shot_iterator()
# Read a bunch of entries from the dataset and compare the data to reference
with tf.Session() as sess:
iterator = iterator.get_next()
for _, _ in enumerate(carbon_synthetic_dataset.data):
sess.run(iterator)
with pytest.raises(tf.errors.UnknownError, match=r'.*Multiple iterations.*'):
sess.run(iterator)
@pytest.mark.forked
@pytest.mark.parametrize('reader_factory', ALL_READER_FLAVOR_FACTORIES)
def test_some_processing_functions(carbon_synthetic_dataset, reader_factory):
"""Try several ``tf.data.Dataset`` dataset operations on make_pycarbon_dataset"""
# reader1 will have a single row with id=1, reader2: a single row with id=2
# Using functools.partial(_eq, 1)) which is equivalent to lambda x: x==1 because standard python pickle
# can not pickle this lambda
with reader_factory(carbon_synthetic_dataset.url,
predicate=in_lambda(['id'], functools.partial(operator.eq, 1))) as reader1:
with reader_factory(carbon_synthetic_dataset.url,
predicate=in_lambda(['id'], functools.partial(operator.eq, 2))) as reader2:
dataset = make_pycarbon_dataset(reader1) \
.prefetch(10) \
.concatenate(make_pycarbon_dataset(reader2)) \
.map(lambda x: x.id) \
.batch(2)
next_sample = dataset.make_one_shot_iterator().get_next()
with tf.Session() as sess:
# 'actual' is expected to be content of id column of a concatenated dataset
actual = sess.run(next_sample)
np.testing.assert_array_equal(actual, [1, 2])
@pytest.mark.parametrize('reader_factory', ALL_READER_FLAVOR_FACTORIES)
def test_dataset_on_ngram_not_supported(carbon_syn | thetic_dataset, reader_factory):
ngram = NGram({0: list(_EXCLUDE_FIELDS), 1: [TestSchema.id]}, 100, TestSchema.id)
with reader_factory(carbon_synthetic_dataset.url, schema_fie | lds=ngram) as reader:
with pytest.raises(NotImplementedError):
make_pycarbon_dataset(reader)
@pytest.mark.forked
def test_non_unischema_with_many_colums_with_one_shot_iterator(carbon_many_columns_non_unischema_dataset):
"""Just a bunch of read and compares of all values to the expected values"""
with make_batch_carbon_reader(carbon_many_columns_non_unischema_dataset.url, workers_count=1) as reader:
dataset = make_pycarbon_dataset(reader)
iterator = dataset.make_one_shot_iterator()
# Make sure we have static shape info for all fields
for shape in dataset.output_shapes:
# TODO(yevgeni): check that the shapes are actually correct, not just not None
assert shape.dims is not None
# Read a bunch of entries from the dataset and compare the data to reference
with tf.Session() as sess:
iterator = iterator.get_next()
sample = sess.run(iterator)._asdict()
assert set(sample.keys()) == set(carbon_many_columns_non_unischema_dataset.data[0].keys())
@pytest.mark.forked
def test_dataset_carbon_reader(carbon_synthetic_dataset):
with make_carbon_reader(carbon_syntheti |
asazo/ANN | tarea3/Pregunta2/model_64.py | Python | mit | 1,302 | 0.002304 | import numpy as np
from theano.tensor.shared_randomstreams import RandomStreams
from matplotlib import pyplot
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.datasets import imdb
np.random.seed(3)
srng = RandomStreams(8)
(X_train, y_train), (X_test, y_test) = imdb.load_data(seed=15)
# Concatenamiento de conjuntos de entrenamiento
X = np.concatenate((X_train, X_test), axis=0)
y = np.concatenate((y_train, y_test), axis=0)
# Se cargan las 3000 palabras mas relevantes
top_words = 3000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=3000, seed=15)
# Se acotan los comentarios a un maximo de 500 palabras
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
# Tamanio vector generado por embedding
embedding_vector_length = 64
model = Sequential()
model.add(Embedding(to | p_words, embedding_vector_length, input_length=500))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, | validation_data=(X_test, y_test), nb_epoch=3, batch_size=64)
model.save('LSTM-64.h5') |
paramsingh/codechef-solutions | src/long/sep15/question5.py | Python | mit | 310 | 0 | # Author: Param Singh <paramsingh258@gmail.com>
# Cracking the | code
i, k, s = map(int, raw_input().split())
ai, bi = map(int, raw_input().split())
diff = k - i
v = diff / 2
if diff % 2 == 0:
val = (2 ** (4*v-s)) * ( | ai + bi)
else:
val = (2 ** (4*v - s + 1.5) * (ai + (3 ** 0.5) * bi))
print "%f" % val
|
zzcclp/carbondata | python/setup.py | Python | apache-2.0 | 3,483 | 0.001148 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agr | eements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" | BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import re
import setuptools
from setuptools import setup
PACKAGE_NAME = 'pycarbon'
with open('README.md') as f:
long_description = f.read()
with io.open('__init__.py', 'rt', encoding='utf8') as f:
version = re.search(r'__version__ = \'(.*?)\'', f.read()).group(1)
if version is None:
raise ImportError('Could not find __version__ in __init__.py')
REQUIRED_PACKAGES = [
'petastorm==0.7.2',
'dill>=0.2.1',
'diskcache>=3.0.0',
'numpy>=1.13.3',
'pandas>=0.19.0',
'psutil>=4.0.0',
'pyspark==2.3.2',
'pyzmq>=14.0.0',
'pyarrow==0.11.1',
'six>=1.5.0',
'torchvision>=0.2.1',
'tensorflow>=1.4.0',
'jnius>=1.1.0',
'pyjnius>=1.2.0',
'huaweicloud-sdk-python-modelarts-dataset>=0.1.1',
'future==0.17.1',
'futures>=2.0; python_version == "2.7"',
]
EXTRA_REQUIRE = {
# `docs` versions are to facilitate local generation of documentation.
# Sphinx 1.3 would be desirable, but is incompatible with current ATG setup.
# Thus the pinning of both sphinx and alabaster versions.
'docs': [
'sphinx==1.2.2',
'alabaster==0.7.11'
],
'opencv': ['opencv-python>=3.2.0.6'],
'tf': ['tensorflow>=1.4.0'],
'tf_gpu': ['tensorflow-gpu>=1.4.0'],
'test': [
'Pillow>=3.0',
'codecov>=2.0.15',
'mock>=2.0.0',
'opencv-python>=3.2.0.6',
'flake8',
'pylint>=1.9',
'pytest>=3.0.0',
'pytest-cov>=2.5.1',
'pytest-forked>=0.2',
'pytest-logger>=0.4.0',
'pytest-timeout>=1.3.3',
'pytest-xdist',
's3fs>=0.0.1',
],
'torch': ['torchvision>=0.2.1'],
}
packages = setuptools.find_packages()
setup(
name=PACKAGE_NAME,
version=version,
install_requires=REQUIRED_PACKAGES,
packages=packages,
description='Pycarbon is a library that optimizes data access for AI based on CarbonData files, '
' and it is based on Petastorm.',
long_description=long_description,
long_description_content_type="text/markdown",
license='Apache License, Version 2.0',
extras_require=EXTRA_REQUIRE,
entry_points={
},
url='https://github.com/apache/carbondata',
author='Apache CarbonData',
classifiers=[
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
gacarrillor/QGIS | tests/src/python/test_authmanager_pki_postgres.py | Python | gpl-2.0 | 5,746 | 0.001568 | # -*- coding: utf-8 -*-
"""
Tests for auth manager PKI access to postgres.
This is an integration test for QGIS Desktop Auth Manager postgres provider that
checks if QGIS can use a stored auth manager auth configuration to access
a PKI protected postgres.
From build dir, run: ctest -R PyQgsAuthManagerPKIPostgresTest -V
It uses a docker container as postgres/postgis server with certificates from tests/testdata/auth_system/certs_keys_2048
Use docker-compose -f .docker/docker-compose-testing-postgres.yml up postgres to start the server.
TODO:
- Document how to restore the server data
- Document how to use docker inspect to find the IP of the docker postgres server and set a host alias (or some other smart idea to do the same)
.. note:: This program is free software; you | can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of | the License, or
(at your option) any later version.
"""
import os
import time
import signal
import stat
import subprocess
import tempfile
import glob
from shutil import rmtree
from utilities import unitTestDataPath
from qgis.core import (
QgsApplication,
QgsAuthManager,
QgsAuthMethodConfig,
QgsVectorLayer,
QgsDataSourceUri,
QgsWkbTypes,
)
from qgis.PyQt.QtNetwork import QSslCertificate
from qgis.PyQt.QtCore import QFile
from qgis.testing import (
start_app,
unittest,
)
__author__ = 'Alessandro Pasotti'
__date__ = '25/10/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
qgis_app = start_app()
class TestAuthManager(unittest.TestCase):
@classmethod
def setUpAuth(cls):
"""Run before all tests and set up authentication"""
authm = QgsApplication.authManager()
assert (authm.setMasterPassword('masterpassword', True))
# Client side
cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'qgis_ca.crt')
cls.sslcert = os.path.join(cls.certsdata_path, 'docker.crt')
cls.sslkey = os.path.join(cls.certsdata_path, 'docker.key')
assert os.path.isfile(cls.sslcert)
assert os.path.isfile(cls.sslkey)
assert os.path.isfile(cls.sslrootcert_path)
os.chmod(cls.sslcert, stat.S_IRUSR)
os.chmod(cls.sslkey, stat.S_IRUSR)
os.chmod(cls.sslrootcert_path, stat.S_IRUSR)
cls.auth_config = QgsAuthMethodConfig("PKI-Paths")
cls.auth_config.setConfig('certpath', cls.sslcert)
cls.auth_config.setConfig('keypath', cls.sslkey)
cls.auth_config.setName('test_pki_auth_config')
cls.pg_user = 'docker'
cls.pg_pass = 'docker'
cls.pg_host = 'postgres'
cls.pg_port = '5432'
cls.pg_dbname = 'qgis_test'
cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path)
assert cls.sslrootcert is not None
authm.storeCertAuthorities(cls.sslrootcert)
authm.rebuildCaCertsCache()
authm.rebuildTrustedCaCertsCache()
authm.rebuildCertTrustCache()
assert (authm.storeAuthenticationConfig(cls.auth_config)[0])
assert cls.auth_config.isValid()
@classmethod
def setUpClass(cls):
"""Run before all tests:
Creates an auth configuration"""
cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys_2048')
cls.setUpAuth()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
super().tearDownClass()
def setUp(self):
"""Run before each test."""
super().setUp()
def tearDown(self):
"""Run after each test."""
super().tearDown()
@classmethod
def _getPostGISLayer(cls, type_name, layer_name=None, authcfg=None):
"""
PG layer factory
"""
if layer_name is None:
layer_name = 'pg_' + type_name
uri = QgsDataSourceUri()
uri.setWkbType(QgsWkbTypes.Point)
uri.setConnection(cls.pg_host, cls.pg_port, cls.pg_dbname, cls.pg_user, cls.pg_pass, QgsDataSourceUri.SslVerifyFull, authcfg)
uri.setKeyColumn('pk')
uri.setSrid('EPSG:4326')
uri.setDataSource('qgis_test', 'someData', "geom", "", "pk")
# Note: do not expand here!
layer = QgsVectorLayer(uri.uri(False), layer_name, 'postgres')
return layer
def testValidAuthAccess(self):
"""
Access the protected layer with valid credentials
"""
pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(pg_layer.isValid())
def testInvalidAuthAccess(self):
"""
Access the protected layer with not valid credentials
"""
pg_layer = self._getPostGISLayer('testlayer_èé')
self.assertFalse(pg_layer.isValid())
def testRemoveTemporaryCerts(self):
"""
Check that no temporary cert remain after connection with
postgres provider
"""
def cleanTempPki():
pkies = glob.glob(os.path.join(tempfile.gettempdir(), 'tmp*_{*}.pem'))
for fn in pkies:
f = QFile(fn)
f.setPermissions(QFile.WriteOwner)
f.remove()
# remove any temppki in temporary path to check that no
# other pki remain after connection
cleanTempPki()
# connect using postgres provider
pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(pg_layer.isValid())
# do test no certs remained
pkies = glob.glob(os.path.join(tempfile.gettempdir(), 'tmp*_{*}.pem'))
self.assertEqual(len(pkies), 0)
if __name__ == '__main__':
unittest.main()
|
dwesa/muhw | app.py | Python | artistic-2.0 | 729 | 0.006859 | __author__ = 'dalewesa'
import flask
from flask import Flask
from flask import render_template
import json
from werkzeug.serving import run_simple
app = Flask(__name__)
@app.route("/")
def home():
return render_template('index.html')
@app.route("/new")
def sendembed():
| embedHTML = {'embed': '<iframe width="100%" height= | "80%" scrolling="no" frameborder="no" src="https://w.soundcloud.com/player/?url=https%3A//api.soundcloud.com/users/12006695&color=ff5500&auto_play=false&hide_related=false&show_comments=true&show_user=true&show_reposts=false"></iframe>'}
return flask.jsonify(embedHTML)
if __name__ == "__main__":
#run_simple("homestylebeatz.com", 80, app)
app.run(debug=True)
|
rushiagr/keystone | keystone/contrib/ec2/controllers.py | Python | apache-2.0 | 17,344 | 0 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the EC2 Credentials service.
This service allows the creation of access/secret credentials used for
the ec2 interop layer of OpenStack.
A user can create as many access/secret pairs, each of which map to a
specific project. This is required because OpenStack supports a user
belonging to multiple projects, whereas the signatures created on ec2-style
requests don't allow specification of which project the user wishes to act
upon.
To complete the cycle, we provide a method that OpenStack services can
use to validate a signature and get a corresponding OpenStack token. This
token allows method calls to other services within the context the
access/secret was created. As an example, Nova requests Keystone to validate
the signature of a request, receives a token, and then makes a request to
Glance to list images needed to perform the requested task.
"""
import abc
import sys
import uuid
from keystoneclient.contrib.ec2 import utils as ec2_utils
from oslo_serialization import jsonutils
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import utils
from keystone.common import wsgi
from keystone import exception
from keystone.i18n import _
from keystone.models import token_model
@dependency.requires('assignment_api', 'catalog_api', 'credential_api',
'identity_api', 'resource_api', 'role_api',
'token_provider_api')
@six.add_metaclass(abc.ABCMeta)
class Ec2ControllerCommon(object):
def check_signature(self, creds_ref, credentials):
signer = ec2_utils.Ec2Signer(creds_ref['secret'])
signature = signer.generate(credentials)
if utils.auth_str_equal(credentials['signature'], signature):
return
# NOTE(vish): Some libraries don't use the port when signing
# requests, so try again without port.
elif ':' in credentials['signature']:
hostname, _port = credentials['host'].split(':')
credentials['host'] = hostname
signature = signer.generate(credentials)
if not utils.auth_str_equal(credentials.signature, signature):
raise exception.Unauthorized(message='Invalid EC2 signature.')
else:
raise exception.Unauthorized(message='EC2 signature not supplied.')
@abc.abstractmethod
def authenticate(self, context, credentials=None, ec2Credentials=None):
"""Validate a signed EC2 request and provide a token.
Other services (such as Nova) use this **admin** call to determine
if a request they signed received is from a valid user.
If it is a valid signature, an OpenStack token that maps
to the user/tenant is returned to the caller, along with
all the other details returned from a normal token validation
call.
The returned token is useful for making calls to other
OpenStack services within the context of the request.
:param context: standard context
:param credentials: dict of ec2 signature
:param ec2Credentials: DEPRECATED dict of ec2 signature
:returns: token: OpenStack token equivalent to access key along
with the corresponding service catalog and roles
"""
raise exception.NotImplemented()
def _authenticate(self, credentials=None, ec2credentials=None):
"""Common code shared between the V2 and V3 authenticate methods.
:returns: user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref
"""
# FIXME(ja): validate that a service token was used!
# NOTE(termie): backwards compat hack
if not credentials and ec2credentials:
| credentials = ec2credentials
if 'access' not in credentials:
raise exception.Unauthorized(message='EC2 signature not supplied.')
creds_ref = self._get_credentials(credentials['access'])
self.check_signature(creds_ref, credentials)
# TODO(termie): don't create new tokens every time
# TODO(termie): this is copied from Toke | nController.authenticate
tenant_ref = self.resource_api.get_project(creds_ref['tenant_id'])
user_ref = self.identity_api.get_user(creds_ref['user_id'])
metadata_ref = {}
metadata_ref['roles'] = (
self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], tenant_ref['id']))
trust_id = creds_ref.get('trust_id')
if trust_id:
metadata_ref['trust_id'] = trust_id
metadata_ref['trustee_user_id'] = user_ref['id']
# Validate that the auth info is valid and nothing is disabled
try:
self.identity_api.assert_user_enabled(
user_id=user_ref['id'], user=user_ref)
self.resource_api.assert_domain_enabled(
domain_id=user_ref['domain_id'])
self.resource_api.assert_project_enabled(
project_id=tenant_ref['id'], project=tenant_ref)
except AssertionError as e:
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
roles = metadata_ref.get('roles', [])
if not roles:
raise exception.Unauthorized(message='User not valid for tenant.')
roles_ref = [self.role_api.get_role(role_id) for role_id in roles]
catalog_ref = self.catalog_api.get_catalog(
user_ref['id'], tenant_ref['id'], metadata_ref)
return user_ref, tenant_ref, metadata_ref, roles_ref, catalog_ref
def create_credential(self, context, user_id, tenant_id):
"""Create a secret/access pair for use with ec2 style auth.
Generates a new set of credentials that map the user/tenant
pair.
:param context: standard context
:param user_id: id of user
:param tenant_id: id of tenant
:returns: credential: dict of ec2 credential
"""
self.identity_api.get_user(user_id)
self.resource_api.get_project(tenant_id)
trust_id = self._get_trust_id_for_request(context)
blob = {'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'trust_id': trust_id}
credential_id = utils.hash_access_key(blob['access'])
cred_ref = {'user_id': user_id,
'project_id': tenant_id,
'blob': jsonutils.dumps(blob),
'id': credential_id,
'type': 'ec2'}
self.credential_api.create_credential(credential_id, cred_ref)
return {'credential': self._convert_v3_to_ec2_credential(cred_ref)}
def get_credentials(self, user_id):
"""List all credentials for a user.
:param user_id: id of user
:returns: credentials: list of ec2 credential dicts
"""
self.identity_api.get_user(user_id)
credential_refs = self.credential_api.list_credentials_for_user(
user_id)
return {'credentials':
[self._convert_v3_to_ec2_credential(credential)
for credential in credential_refs]}
def get_credential(self, user_id, credential_id):
"""Retrieve a user's access/secret pair by the access key.
Grab the full access/secret pair for a given access key.
:param user_id: id of user
:param credential_id: access key for credentials
:returns: credential: dict of ec2 credential
"""
self.identity_api.get_user( |
NoctuaNivalis/qutebrowser | tests/unit/utils/test_qtutils.py | Python | gpl-3.0 | 32,541 | 0.000061 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.utils.qtutils."""
import io
import os
import os.path
import unittest
import unittest.mock
try:
# pylint: disable=no-name-in-module,useless-suppression
from test import test_file
# pylint: enable=no-name-in-module,useless-suppression
except ImportError:
# Debian patches Python to remove the tests...
test_file = None
import pytest
from PyQt5.QtCore import (QDataStream, QPoint, QUrl, QByteArray, QIODevice,
QTimer, QBuffer, QFile, QProcess, QFileDevice)
from qutebrowser.utils import qtutils, utils
import overflow_test_cases
@pytest.mark.parametrize(['qversion', 'compiled', 'pyqt', 'version', 'exact',
'expected'], [
# equal versions
('5.4.0', None, None, '5.4.0', False, True),
('5.4.0', None, None, '5.4.0', True, True), # exact=True
('5.4.0', None, None, '5.4', True, True), # without trailing 0
# newer version installed
('5.4.1', None, None, '5.4', False, True),
('5.4.1', None, None, '5.4', True, False), # exact=True
# older version installed
('5.3.2', None, None, '5.4', False, False),
('5.3.0', None, None, '5.3.2', False, False),
('5.3.0', None, None, '5.3.2', True, False), # exact=True
# compiled=True
# new Qt runtime, but compiled against older version
('5.4.0', '5.3.0', '5.4.0', '5.4.0', False, False),
# new Qt runtime, compiled against new version, but old PyQt
('5.4.0', '5.4.0', '5.3.0', '5.4.0', False, False),
# all up-to-date
('5.4.0', '5.4.0', '5.4.0', '5.4.0', False, True),
])
def test_version_check(monkeypatch, qversion, compiled, pyqt, version, exact,
expected):
"""Test for version_check().
Args:
monkeypatch: The pytest monkeypatch fixture.
qversion: The version to set as fake qVersion().
compiled: The value for QT_VERSION_STR (set compiled=False)
pyqt: The value for PYQT_VERSION_STR (set compiled=False)
version: The version to compare with.
exact: Use exact comparing (==)
expected: The expected result.
"""
monkeypatch.setattr(qtutils, 'qVersion', lambda: qversion)
if compiled is not None:
monkeypatch.setattr(qtutils, 'QT_VERSION_STR', compiled)
monkeypatch.setattr(qtutils, 'PYQT_VERSION_STR', pyqt)
compiled_arg = True
else:
compiled_arg = False
actual = qtutils.version_check(version, exact, compiled=compiled_arg)
assert actual == expected
def test_version_check_compiled_and_exact():
with pytest.raises(ValueError):
qtutils.version_check('1.2.3', exact=True, compiled=True)
@pytest.mark.parametrize('version, is_new', [
('537.21', False), # QtWebKit 5.1
('538.1', False), # Qt 5.8
('602.1', True) # new QtWebKit TP5, 5.212 Alpha
])
def test_is_new_qtwebkit(monkeypatch, version, is_new):
monkeypatch.setattr(qtutils, 'qWebKitVersion', lambda: version)
assert qtutils.is_new_qtwebkit() == is_new
class TestCheckOverflow:
"""Test check_overflow."""
@pytest.mark.parametrize('ctype, val',
overflow_test_cases.good_values())
def test_good_values(self, ctype, val):
"""Test values which are inside bounds."""
qtutils.check_overflow(val, ctype)
@pytest.mark.parametrize('ctype, val',
[(ctype, val) for (ctype, val, _) in
overflow_test_cases.bad_values()])
def test_bad_values_fatal(self, ctype, val):
"""Test values which are outside bounds with fatal=True."""
with pytest.raises(OverflowError):
qtutils.check_overflow(val, ctype)
@pytest.mark.parametrize('ctype, val, repl',
overflow_test_cases.bad_values())
def test_bad_values_nonfatal(self, ctype, val, repl):
"""Test values which are outside bounds with fatal=False."""
newval = qtutils.check_overflow(val, ctype, fatal=False)
assert newval == repl
class QtObject:
"""Fake Qt object for test_ensure."""
def __init__(self, valid=True, null=False, error=None):
self._valid = valid
self._null = null
self._error = error
def __repr__(self):
return '<QtObject>'
def errorString(self):
"""Get the fake error, or raise AttributeError if set to None."""
if s | elf._error is None:
raise AttributeError
else:
return self._error
def isValid(self):
return self._valid
def isNull(self):
return self._null
@pytest.mark.parametrize('obj, | raising, exc_reason, exc_str', [
# good examples
(QtObject(valid=True, null=True), False, None, None),
(QtObject(valid=True, null=False), False, None, None),
# bad examples
(QtObject(valid=False, null=True), True, None, '<QtObject> is not valid'),
(QtObject(valid=False, null=False), True, None, '<QtObject> is not valid'),
(QtObject(valid=False, null=True, error='Test'), True, 'Test',
'<QtObject> is not valid: Test'),
])
def test_ensure_valid(obj, raising, exc_reason, exc_str):
"""Test ensure_valid.
Args:
obj: The object to test with.
raising: Whether QtValueError is expected to be raised.
exc_reason: The expected .reason attribute of the exception.
exc_str: The expected string of the exception.
"""
if raising:
with pytest.raises(qtutils.QtValueError) as excinfo:
qtutils.ensure_valid(obj)
assert excinfo.value.reason == exc_reason
assert str(excinfo.value) == exc_str
else:
qtutils.ensure_valid(obj)
@pytest.mark.parametrize('status, raising, message', [
(QDataStream.Ok, False, None),
(QDataStream.ReadPastEnd, True, "The data stream has read past the end of "
"the data in the underlying device."),
(QDataStream.ReadCorruptData, True, "The data stream has read corrupt "
"data."),
(QDataStream.WriteFailed, True, "The data stream cannot write to the "
"underlying device."),
])
def test_check_qdatastream(status, raising, message):
"""Test check_qdatastream.
Args:
status: The status to set on the QDataStream we test with.
raising: Whether check_qdatastream is expected to raise OSError.
message: The expected exception string.
"""
stream = QDataStream()
stream.setStatus(status)
if raising:
with pytest.raises(OSError, match=message):
qtutils.check_qdatastream(stream)
else:
qtutils.check_qdatastream(stream)
def test_qdatastream_status_count():
"""Make sure no new members are added to QDataStream.Status."""
values = vars(QDataStream).values()
status_vals = [e for e in values if isinstance(e, QDataStream.Status)]
assert len(status_vals) == 4
@pytest.mark.parametrize('obj', [
QPoint(23, 42),
QUrl('http://www.qutebrowser.org/'),
])
def test_serialize(obj):
"""Test a serialize/deserialize round trip.
Args:
obj: The object to test with.
"""
new_obj = type(obj)()
qtutils.deserialize(qtutils.serialize(obj), new_obj)
assert new_obj == obj
class TestSerializeStream:
"""Tests for serialize_stream and deserialize_stream."""
|
njsmith/codetrawl | codetrawl/dump.py | Python | gpl-3.0 | 587 | 0 | # This file is part of Codetrawl
# Copyright (C) 2015 Nathaniel Smith <njs@pobox.com>
# See file LICENSE.txt for license information.
"""Usage:
codetrawl.dump PATTERN FILE [FILE...]
where PATTERN is a Python format string like "{raw_url}", with allowed keys:
- service
- query
- repo
- path
- raw_url
- content
"""
import sys
import docopt
from .read import read_matches
if __name__ == "__main__":
args = docopt.docopt(__doc__)
for match in read_matches(args["FILE"]):
sys. | stdout.write(args["PATTERN"].format(**match))
sys | .stdout.write("\n")
|
brunobergher/dotfiles | sublime/pymdownx/st3/pymdownx/magiclink.py | Python | mit | 10,950 | 0.002648 | """
Magic Link.
pymdownx.magiclink
An extension for Python Markdown.
Find http|ftp links and email address and turn them to actual links
MIT license.
Copyright (c) 2014 - 2017 Isaac Muse <isaacmuse@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.inlinepatterns import LinkPattern, Pattern
from markdown.treeprocessors import Treeprocessor
from markdown import util as md_util
import re
RE_MAIL = r'''(?xi)
(
(?<![-/\+@a-z\d_])(?:[-+a-z\d_]([-a-z\d_+]|\.(?!\.))*) # Local part
(?<!\.)@(?:[-a-z\d_]+\.) # @domain part start
(?:(?:[-a-z\d_]|(?<!\.)\.(?!\.))*)[a-z]\b # @domain.end (allow multiple dot names)
(?![-@]) # Don't allow last char to be followed by these
)
'''
RE_LINK = r'''(?xi)
(
(?:(?<=\b)|(?<=_))(?:
(?:ht|f)tps?://(?:(?:[^_\W][-\w]*(?:\.[-\w.]+)+)|localhost)| # (http|ftp)://
(?P<www>w{3}\.)[^_\W][-\w]*(?:\.[-\w.]+)+ # www.
)
/?[-\w.?,!'(){}\[\]/+&@%$#=:"|~;]* # url path, fragments, and query stuff
(?:[^_\W]|[-/#@$+=]) # allowed end chars
)
'''
RE_AUTOLINK = r'(?i)<((?:ht|f)tps?://[^>]*)>'
RE_REPO_LINK = re.compile(
r'''(?xi)
(?:
(?P<github>(?P<github_base>https://(?:w{3}\.)?github.com/(?P<github_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<github_issue>\d+)/?|
pull/(?P<github_pull>\d+)/?|
commit/(?P<github_commit>[\da-f]+)/?)) |
(?P<bitbucket>(?P<bitbucket_base>https://(?:w{3}\.)?bitbucket.org/(?P<bitbucket_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<bitbucket_issue>\d+)(?:/[^/]+)?/?|
pull-requests/(?P<bitbucket_pull>\d+)(?:/[^/]+(?:/diff)?)?/?|
commits/commit/(?P<bitbucket_commit>[\da-f]+)/?)) |
(?P<gitlab>(?P<gitlab_base>https://(?:w{3}\.)?gitlab.com/(?P<gitlab_user_repo>[^/]+/[^/]+))/
(?:issues/(?P<gitlab_issue>\d+)/?|
merge_requests/(?P<gitlab_pull>\d+)/?|
commit/(?P<gitlab_commit>[\da-f]+)/?))
)
'''
)
class MagicShortenerTreeprocessor(Treeprocessor):
"""Treeprocessor that finds repo issue and commit links and shortens them."""
# Repo link types
ISSUE = 0
PULL = 1
COMMIT = 2
def shorten(self, link, my_repo, link_type, user_repo, value, url, hash_size):
"""Shorten u | rl."""
if link_type is self.COMMIT:
# user/repo@`hash`
text = '' if my_repo else user_repo + '@'
link.text = md_util.AtomicString(text)
# Need a root with an element for things to get processed.
# Send | the `value` through and retreive it from the p element.
# Pop it off and add it to the link.
el = md_util.etree.Element('div')
p = md_util.etree.SubElement(el, 'p')
p.text = '`%s`' % value[0:hash_size]
el = self.markdown.treeprocessors['inline'].run(el)
p = list(el)[0]
for child in list(p):
link.append(child)
p.remove(child)
else:
# user/repo#(issue|pull)
link.text = ('#' + value) if my_repo else (user_repo + '#' + value)
def get_provider(self, match):
"""Get the provider and hash size."""
# Set provider specific variables
if match.group('github'):
provider = 'github'
hash_size = 7
elif match.group('bitbucket'):
provider = 'bitbucket'
hash_size = 7
elif match.group('gitlab'):
provider = 'gitlab'
hash_size = 8
return provider, hash_size
def get_type(self, provider, match):
"""Get the link type."""
# Gather info about link type
if match.group(provider + '_commit') is not None:
value = match.group(provider + '_commit')
link_type = self.COMMIT
elif match.group(provider + '_pull') is not None:
value = match.group(provider + '_pull')
link_type = self.PULL
else:
value = match.group(provider + '_issue')
link_type = self.ISSUE
return value, link_type
def is_my_repo(self, provider, match):
"""Check if link is from our specified repo."""
# See if these links are from the specified repo.
my_repo = match.group(provider + '_base') == self.base
if not my_repo and self.hide_protocol:
my_repo = match.group(provider + '_base') == ('https://' + self.base)
return my_repo
def run(self, root):
"""Shorten popular git repository links."""
self.base = self.config.get('base_repo_url', '').rstrip('/')
self.hide_protocol = self.config['hide_protocol']
links = root.iter('a')
for link in links:
has_child = len(list(link))
is_magic = link.attrib.get('magiclink')
href = link.attrib.get('href', '')
text = link.text
if is_magic:
del link.attrib['magiclink']
# We want a normal link. No subelements embedded in it, just a normal string.
if has_child or not text: # pragma: no cover
continue
# Make sure the text matches the href. If needed, add back protocol to be sure.
# Not all links will pass through MagicLink, so we try both with and without protocol.
if text == href or (is_magic and self.hide_protocol and ('https://' + text) == href):
m = RE_REPO_LINK.match(href)
if m:
provider, hash_size = self.get_provider(m)
my_repo = self.is_my_repo(provider, m)
value, link_type = self.get_type(provider, m)
# All right, everything set, let's shorten.
self.shorten(
link,
my_repo,
link_type,
m.group(provider + '_user_repo'),
value,
href,
hash_size
)
return root
class MagiclinkPattern(LinkPattern):
"""Convert html, ftp links to clickable links."""
def handleMatch(self, m):
"""Handle URL matches."""
shorten = self.config.get('repo_url_shortener', False)
el = md_util.etree.Element("a")
el.text = md_util.AtomicString(m.group(2))
if m.group("www"):
href = "http://%s" % m.group(2)
else:
href = m.group(2)
if self.config['hide_protocol']:
el.text = md_util.AtomicString(el.text[el.text.find("://") + 3:])
if shorten:
el.set('magiclink', '1')
el.set("href", self.sanitize_url(self.unescape(href.strip())))
return el
class MagiclinkAutoPattern(Pattern):
"""Return a link Element given an autolink `<http://example/com>`."""
def handleMatch(s |
faribas/RMG-Java | source/cclib/parser/mm4parser.py | Python | mit | 11,929 | 0.00964 | """
gmagoon 05/03/10: new class for MM4 parsing, based on mopacparser.py, which, in turn, is based on gaussianparser.py from cclib, described below:
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 814 $"
#import re
import numpy
import math
import utils
import logfileparser
def symbol2int(symbol):
t = utils.PeriodicTable()
return t.number[symbol]
class MM4(logfileparser.Logfile):
"""An MM4 output file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(MM4, self).__init__(logname="MM4", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "MM4 log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'MM4("%s")' % (self.filename)
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# Number of atoms.
# Example: THE COORDINATES OF 20 ATOMS ARE READ IN.
if line[0:28] == ' THE COORDINATES OF':
self.updateprogress(inputfile, "Attributes", self.fupdate)
natom = int(line.split()[-5]) #fifth to last component should be number of atoms
if hasattr(self, "natom"):
assert self.natom == natom
else:
self.natom = natom
# Extract the atomic numbers and coordinates from the optimized (final) geometry
# Example:
# FINAL ATOMIC COORDINATE
# ATOM X Y Z TYPE
# C( 1) -3.21470 -0.22058 0.00000 ( 1)
# H( 2) -3.30991 -0.87175 0.89724 ( 5)
# H( 3) -3.30991 -0.87174 -0.89724 ( 5)
# H( 4) -4.08456 0.47380 0.00000 ( 5)
# C( 5) -1.88672 0.54893 0.00000 ( 1)
# H( 6) -1.84759 1.21197 -0.89488 ( 5)
# H( 7) -1.84759 1.21197 0.89488 ( 5)
# C( 8) -0.66560 -0.38447 0.00000 ( 1)
# H( 9) -0.70910 -1.04707 -0.89471 ( 5)
# H( 10) -0.70910 -1.04707 0.89471 ( 5)
# C( 11) 0.66560 0.38447 0.00000 ( 1)
# H( 12) 0.70910 1.04707 0.89471 ( 5)
# H( 13) 0.70910 1.04707 -0.89471 ( 5)
# C( 14) 1.88672 -0.54893 0.00000 ( 1)
# H( 15) 1.84759 -1.21197 -0.89488 ( 5)
# H( 16) 1.84759 -1.21197 0.89488 ( 5)
# C( 17) 3.21470 0.22058 0.00000 ( 1)
# H( 18) 3.30991 0.87174 0.89724 ( 5)
# H( 19) 4.08456 -0.47380 0.00000 ( 5)
# H( 20) 3.30991 0.87175 -0.89724 ( 5)
if line[0:29] == ' FINAL ATOMIC COORDINATE':
self.updateprogress(inputfile, "Attributes", self.cupdate)
self.inputcoords = []
self.inputatoms = []
headerline = inputfile.next()
atomcoords = []
line = inputfile.next()
while len(line.split()) > 0:
broken = line.split()
self.inputatoms.append(symbol2int(line[0:10].strip()))
xc = float(line[17:29])
yc = float(line[29:41])
zc = float(line[41:53])
atomcoords.append([xc,yc,zc])
line = inputfile.next()
self.inputcoords.append(atomcoords)
if not hasattr(self, "atomnos"):
self.atomnos = numpy.array(self.inputatoms, 'i')
if not hasattr(self, "natom"):
self.natom = len(self.atomnos)
#read energy (in kcal/mol, converted to eV)
# Example: HEAT OF FORMATION (HFN) AT 298.2 K = -42.51 KCAL/MOLE
if line[0:31] == ' HEAT OF FORMATION (HFN) AT':
if not hasattr(self, "scfenergies"):
self.scfenergies = []
self.scfenergies.append(utils.convertor(self.float(line.split()[-2])/627.5095, "hartree", "eV")) #note conversion from kcal/mol to hartree
#molecular mass parsing (units will be amu); note that this can occur multiple times in the file, but all values should be the same
| #Example: FORMULA WEIGHT : 86.112
if line[0:33] == ' FORMULA WEIGHT :':
self.updateprogress(inputfile, "Attributes", self.fupdate)
molmass = self.float(line.split()[-1])
if hasattr(self, "molmass"):
assert self.molmass == molmass #check that subs | equent occurences match the original value
else:
self.molmass = molmass
#rotational constants (converted to GHZ)
#Example:
# THE MOMENTS OF INERTIA CALCULATED FROM R(g), R(z) VALUES
# (also from R(e), R(alpha), R(s) VALUES)
#
# Note: (1) All calculations are based on principle isotopes.
# (2) R(z) values include harmonic vibration (Coriolis)
# contribution indicated in parentheses.
#
#
# (1) UNIT = 10**(-39) GM*CM**2
#
# IX IY IZ
#
# R(e) 5.7724 73.4297 76.0735
# R(z) 5.7221(-0.0518) 74.0311(-0.0285) 76.7102(-0.0064)
#
# (2) UNIT = AU A**2
#
# IX IY IZ
#
# R(e) 34.7661 442.2527 458.1757
# R(z) 34.4633(-0.3117) 445.8746(-0.1714) 462.0104(-0.0385)
#moments of inertia converted into rotational constants via rot cons= h/(8*Pi^2*I)
#we will use the equilibrium values (R(e)) in units of 10**-39 GM*CM**2 (these units are less precise (fewer digits) than AU A**2 units but it is simpler as it doesn't require use of Avogadro's number
#***even R(e) may include temperature dependent effects, though, and maybe the one I actually want is r(mm4) (not reported)
if line[0:33] == ' (1) UNIT = 10**(-39) GM*CM**2':
dummyline = inputfile.next();
dummyline = inputfile.next();
dummyline = inputfile.next();
rotinfo=inputfile.next();
if not hasattr(self, "rotcons"):
self.rotcons = []
broken = rotinfo.split()
h = 6.62606896E3 #Planck constant in 10^-37 J-s = 10^-37 kg m^2/s cf. http://physics.nist.gov/cgi-bin/cuu/Value?h#mid
a = h/(8*math.pi*math.pi*float(broken[1]))
b = h/(8*math.pi*math.pi*float(broken[2]))
c = h/(8*math.pi*math.pi*float(broken[3]))
self.rotcons.append([a, b, c])
# Start of the IR/Raman frequency section.
#Example:
#0 FUNDAMENTAL NORMAL VIBRATIONAL FREQUENCIES
# ( THEORETICALLY 54 VALUES )
#
# Frequency : in 1/cm
# A(i) : IR intensity (vs,s,m,w,vw,-- or in 10**6 cm/mole)
# A(i) = -- : IR inactive
#
#
# no Frequency Symmetry A(i)
#
# 1. 2969.6 (Bu ) s
# 2. 2969.6 (Bu ) w
# 3. 2967.6 (Bu ) w
# 4. 2967.6 (Bu ) s
# 5. 2931.2 (Au ) vs
# 6. 2927.8 (Bg ) --
# 7. 2924.9 (Au ) m
# 8. 2923.6 (Bg ) --
# 9. 2885.8 (Ag ) --
# 10. 2883.9 (Bu ) w
# 11. 2879.8 (Ag ) --
# 12. 2874.6 (Bu ) w
# 13. 2869.6 (Ag ) |
weichweich/Pi-Timeswitch | Flask-Server/timeswitch/auth/schema.py | Python | mit | 1,110 | 0.005405 | import logging
from marshmallow import ValidationError, post_load
from marshmallow_jsonapi import Schema, fields
from timeswitch.auth.dao import User
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
LOGGER = logging.getLogger(__name__)
class AppError(Exception):
pass
def dasherize(text):
return | text.replace('_', '-')
class UserSch | ema(Schema):
id = fields.String(dump_only=True, required=True)
name = fields.String(required=True)
password = fields.String(load_only=True, required=False, attribute="password_clear")
new_password = fields.String(load_only=True, required=False)
email = fields.Email(required=False)
last_loggin = fields.String(required=False)
privilege = fields.String(required=False)
@post_load
def make_user(self, data):
return User(**data)
def handle_error(self, exc, data):
raise ValidationError('An error occurred with input: {0} \n {1}'.format(data, str(exc)))
class Meta:
type_ = 'users'
# inflect = dasherize
|
openstack/octavia | octavia/tests/unit/amphorae/backends/utils/test_interface.py | Python | apache-2.0 | 33,169 | 0 | # Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import socket
from unittest import mock
import pyroute2
from octavia.amphorae.backends.utils import interface
from octavia.amphorae.backends.utils import interface_file
from octavia.common import constants as consts
from octavia.common import exceptions
from octavia.tests.common import utils as test_utils
import octavia.tests.unit.base as base
class TestInterface(base.TestCase):
@mock.patch('os.listdir')
@mock.patch('octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.get_directory')
def test_interface_file_list(self, mock_get_directory, mock_listdir):
mock_get_directory.return_value = consts.AMP_NET_DIR_TEMPLATE
ifaces = ('eth0', 'eth7', 'eth8')
mock_listdir.return_value = [
"{}.json".format(iface)
for iface in ifaces
]
mock_listdir.return_value.extend(["invalidfile"])
controller = interface.InterfaceController()
r = controller.interface_file_list()
config_file_list = list(r)
for iface in ifaces:
f = os.path.join(consts.AMP_NET_DIR_TEMPLATE,
"{}.json".format(iface))
self.assertIn(f, config_file_list)
# unsupported file
f = os.path.join(consts.AMP_NET_DIR_TEMPLATE,
"invalidfile")
self.assertNotIn(f, config_file_list)
# non existing file
f = os.path.join(consts.AMP_NET_DIR_TEMPLATE,
"eth2.json")
self.assertNotIn(f, config_file_list)
@mock.patch('os.listdir')
@mock.patch('octavia.amphorae.backends.utils.interface_file.'
'InterfaceFile.get_directory')
def test_list(self, mock_get_directory, mock_listdir):
mock_get_directory.return_value = consts.AMP_NET_DIR_TEMPLATE
mock_listdir.return_value = ["fakeiface.json"]
content = ('{\n'
'"addresses": [\n'
'{"address": "10.0.0.2",\n'
'"prefixlen": 24}\n'
'],\n'
'"mtu": 1450,\n'
'"name": "eth1",\n'
'"routes": [\n'
'{"dst": "0.0.0.0/0",\n'
'"gateway": "10.0.0.1"},\n'
'{"dst": "10.11.0.0/16",\n'
'"gateway": "10.0.0.24"}\n'
'],\n'
'"rules": [\n'
'{"src": "10.0.0.2",\n'
'"src_len": 32,\n'
'"table": 100}\n'
'],\n'
'"scripts": {\n'
'"up": [\n'
'{"command": "up-script"}],\n'
'"down": [\n'
'{"command": "down-script"}]\n'
'}}\n')
filename = os.path.join(consts.AMP_NET_DIR_TEMPLATE,
"fakeiface.json")
self.useFixture(
test_utils.OpenFixture(filename,
contents=content))
controller = interface.InterfaceController()
ifaces = controller.list()
self.assertIn("eth1", ifaces)
iface = ifaces["eth1"]
expected_dict = {
consts.NAME: "eth1",
consts.MTU: 1450,
consts.ADDRESSES: [{
consts.ADDRESS: "10.0.0.2",
consts.PREFIXLEN: 24
}],
consts.ROUTES: [{
consts.DST: "0.0.0.0/0",
consts.GATEWAY: "10.0.0.1"
}, {
consts.DST: "10.11.0.0/16",
consts.GATEWAY: "10.0.0.24"
}],
consts.RULES: [{
consts.SRC: "10.0.0.2",
consts.SRC_LEN: 32,
consts.TABLE: 100
}],
consts.SCRIPTS: {
consts.IFACE_UP: [{
consts.COMMAND: "up-script"
}],
consts.IFACE_DOWN: [{
consts.COMMAND: "down-script"
}]
}
}
self.assertEqual(expected_dict[consts.NAME], iface.name)
self.assertEqual(expected_dict[consts.MTU], iface.mtu)
test_utils.assert_address_lists_equal(
self, expected_dict[consts.ADDRESSES], iface.addresses)
test_utils.assert_rule_lists_equal(
self, expected_dict[cons | ts.RULES], iface.rules)
test_utils.assert_script_lists_equal(
self, expected_dict[consts.SCRIPTS], iface.scripts)
def test__ipr_command(self):
mock_ipr_addr = mock.MagicMock()
controller = interface.InterfaceController()
contr | oller._ipr_command(mock_ipr_addr,
controller.ADD,
arg1=1, arg2=2)
mock_ipr_addr.assert_called_once_with('add', arg1=1, arg2=2)
def test__ipr_command_add_eexist(self):
mock_ipr_addr = mock.MagicMock()
mock_ipr_addr.side_effect = [
pyroute2.NetlinkError(code=errno.EEXIST)
]
controller = interface.InterfaceController()
controller._ipr_command(mock_ipr_addr,
controller.ADD,
arg1=1, arg2=2)
mock_ipr_addr.assert_called_once_with('add', arg1=1, arg2=2)
def test__ipr_command_add_retry(self):
mock_ipr_addr = mock.MagicMock()
mock_ipr_addr.side_effect = [
pyroute2.NetlinkError(code=errno.EINVAL),
pyroute2.NetlinkError(code=errno.EINVAL),
pyroute2.NetlinkError(code=errno.EINVAL),
None
]
controller = interface.InterfaceController()
controller._ipr_command(mock_ipr_addr,
controller.ADD,
retry_on_invalid_argument=True,
retry_interval=0,
arg1=1, arg2=2)
mock_ipr_addr.assert_has_calls([
mock.call('add', arg1=1, arg2=2),
mock.call('add', arg1=1, arg2=2),
mock.call('add', arg1=1, arg2=2),
mock.call('add', arg1=1, arg2=2)])
def test__ipr_command_add_einval_failed(self):
mock_ipr_addr = mock.MagicMock()
mock_ipr_addr.__name__ = "addr"
mock_ipr_addr.side_effect = [
pyroute2.NetlinkError(code=errno.EINVAL)
] * 21
controller = interface.InterfaceController()
self.assertRaises(exceptions.AmphoraNetworkConfigException,
controller._ipr_command,
mock_ipr_addr,
controller.ADD,
retry_on_invalid_argument=True,
max_retries=20,
retry_interval=0,
arg1=1, arg2=2)
mock_ipr_addr.assert_has_calls([
mock.call('add', arg1=1, arg2=2)
] * 20)
def test__ipr_command_add_failed(self):
mock_ipr_addr = mock.MagicMock()
mock_ipr_addr.__name__ = "addr"
mock_ipr_addr.side_effect = [
pyroute2.NetlinkError(code=errno.ENOENT)
]
controller = interface.InterfaceController()
self.assertRaises(exceptions.AmphoraNetworkConfigException,
controller._ipr_command,
mock_ipr_addr,
controller.ADD,
retry_on_invalid_argument=True,
max_retries=20,
retry_interval= |
ellipses/Yaksha | src/commands/voting.py | Python | mit | 7,274 | 0.000825 | #!/usr/bin/python
from commands.utilities import register
import asyncio
import re
class Voting():
def __init__(self, config=None):
# regex to match a number at the start of the message.
# Being a float is optional.
self.length_re = r'--((\d*)?(\.\d*)?)'
# regex to atch and capture vote options in square bracket.
self.options_re = r'\[(.+)\]'
self.vote_length = 0.5
self.default_options = ['yes', 'no']
self.active_votes = {}
def apply_regex(self, msg, regex):
'''
Applies the regex and removes the matched
elements from the message.
Returns the matched group.
'''
result = re.search(regex, msg)
if result:
msg = re.sub(regex, '', msg).strip()
return msg, result.group(0)
else:
return False
def handle_input(self, msg):
'''
Parses the supplied message to determine the vote
length and supplied parameteres(if any).
Expected vote format:
!vote[--time] String of the vote [[parameter1, parm2]]
'''
# Check if the user supplied a length
regex_result = self.apply_regex(msg, self.length_re)
if regex_result:
msg, matched_length = regex_result
# start at the second index to avoid the -- at the start
# of the time parameter.
vote_length = float(matched_length[2:])
else:
vote_length = self.vote_length
# Check if the user supplied extra parameters
regex_result = self.apply_regex(msg, self.options_re)
if regex_result:
msg, extra_options = regex_result
# remove square brackets, split on comma and strip whitespace
extra_options = extra_options.replace('[', '').replace(']', '')
options = extra_options.lower().split(',')
options = [word.strip() for word in options]
# Storing length in a variable here to later compare
# after forming a dictionary to ensure there were no
# duplicates.
option_len = len(options)
if len(options) < 2:
return False
# Create a dictionary with the voter counts set to 0
values = [0 for option in options]
vote_options = dict(zip(options, values))
# Make sure the options aren't repeated by comparing length
# before the dictionary was created.
if option_len != len(vote_options):
return False
else:
values = [0 for index in self.default_options]
vote_options = dict(zip(self.default_options, values))
# What remains of the msg should be the vote question.
if len(msg.strip()) < 1:
return False
return msg, vote_length, vote_options
async def send_start_message(self, client, channel, vote_length, msg):
'''
Simple function that sends a message that a
vote has started asyncronously.
'''
vote_parms = self.active_votes[channel][1]
start_string = 'Starting vote ```%s``` with options ' % msg
param_string = ' '.join(['%s' for index in range(len(vote_parms))])
start_string += '[ ' + param_string % tuple(vote_parms.keys()) + ' ]'
start_string += ' For %s minutes.' % vote_length
await channel.send(start_string)
async def end_vote(self, client, channel, msg):
'''
Counts the votes to determine the winner and sends
the finish message. Cant simply check the max value
because there might be a draw. Should probably break
it up.
'''
vote_parms = self.active_votes[channel][1]
end_string = 'Voting for ```%s``` completed.' % msg
max_value = max(vote_parms.values())
winners = [key for key, value in vote_parms.items()
if value == max_value]
if len(winners) == 1:
end_string += ' The winner is **%s**' % tuple(winners)
else:
winner_string = ' '.join(['%s' for index in range(len(winners))])
end_string += ' The winners are [ **' + winner_string % tuple(winners) + '** ]'
await channel.send(end_string)
async def run_vote(self, client, channel, vote_length, msg):
'''
Simple async function that sleeps for the vote length
and calls the start and end voting functions.
'''
await self.send_start_message(client, channel, vote_length, msg)
# sleep for the vote length.
await asyncio.sleep(vote_length * 60)
# Count the votes and send the ending message
await self.end_vote(client, channel, msg)
# Delete the dictionary entry now that t | he vote is finished.
del self.active_votes[channel]
@register(' | !vote')
async def start_vote(self, msg, user, channel, client, *args, **kwargs):
'''
Main function that handles the vote function. Makes sure
that only vote is going at a time in a channel.
Calls from a channel that has a vote going on are
considered to be a vote for the ongoing vote.
dict entry: active_votes(client, {option: count}, [voters])
'''
if channel not in self.active_votes:
processed_input = self.handle_input(msg)
if processed_input:
msg, vote_len, params = processed_input
# Save a reference to the sleep function, the valid params
# for the specific vote and an empty list which will contain
# the name of users who have already voted.
self.active_votes[channel] = (self.run_vote, params, [])
# print('starting vote with ', params)
# Start the actual vote.
await self.active_votes[channel][0](client, channel, vote_len, msg)
else:
return ('Invalid format for starting a vote. The correct format is '
'```!vote[--time] Vote question [vote options]``` '
'**eg:** !vote start a vote on some topic? [yes, no, maybe]')
else:
# An active vote already exists for this channel.
# First check if the user has already voted in it.
if user in self.active_votes[channel][2]:
return ("Stop attempting electoral fraud %s, "
"you've already voted") % user
else:
# Check if the supplied argument is a valid vote option.
vote_option = msg.lower().strip()
valid_options = self.active_votes[channel][1]
if vote_option in valid_options:
self.active_votes[channel][1][vote_option] += 1
# Add the user to the list of users.
self.active_votes[channel][2].append(user)
# return 'Increasing %s vote :)' % vote_option
else:
error_str = 'Invalid vote option %s. ' % user
error_str += 'The options are ' + str(tuple(valid_options.keys()))
return error_str
|
QuLogic/meson | mesonbuild/scripts/clangformat.py | Python | apache-2.0 | 3,260 | 0.002147 | # Copyright 2018 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import subprocess
import itertools
import fnmatch
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
from ..environment import detect_clangformat
from ..compilers import lang_suffixes
import typing as T
def parse_pattern_file(fname: Path) -> T.List[str]:
patterns = []
try:
with fname.open(encoding='utf-8') as f:
for line in f:
pattern = line.strip()
if pattern and not pattern.startswith('#'):
patterns.append(pattern)
except FileNotFoundError:
pass
return patterns
def run_clang_format(exelist: T.List[str], fname: Path, check: bool) -> subprocess.CompletedProcess:
if check:
original = fname.read_bytes()
before = fname.stat().st_mtime
args = ['-style=file', '-i', str(fname)]
ret = subprocess.run(exelist + args)
after = fname.stat().st_mtime
if before != after:
print('File reformatted: ', fname)
if check:
# Restore the original if only checking.
fname.write_bytes(original)
ret.returncode = 1
return ret
def clangformat(exelist: T.List[str], srcdir: Path, builddir: Path, check: bool) -> int:
patterns = parse_pattern_file(srcdir / '.clang-format-include')
if not patterns:
patterns = ['**/*']
globs = [srcdir.glob(p) for p in patterns]
patterns = parse_pattern_file(srcdir / '.clang-format-ignore')
ignore = [str(builddir / '*')]
ignore.extend([str(srcdir / p) for p in patterns])
suffixes = set(lang_suffixes['c']).union(set(lang_suffixes['cpp']))
suffixes.add('h')
suffixes = set([f'.{s}' for s in suffixes])
futures = []
returncode = 0
with ThreadPoolExecutor() as e:
for f in itertools.chain(*globs):
strf = str(f)
if f.is_dir() or f.suffix not in suffixes or \
any( | fnmatch.fnmatch(strf, i) for i in ignore):
continue
futures.append(e.submit(run_clang_format, exelist, f, check))
returncode = max([x.result().returncode for x in futures])
return returncode
def run(args: T.List[str]) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('--check', action='store_true')
| parser.add_argument('sourcedir')
parser.add_argument('builddir')
options = parser.parse_args(args)
srcdir = Path(options.sourcedir)
builddir = Path(options.builddir)
exelist = detect_clangformat()
if not exelist:
print('Could not execute clang-format "%s"' % ' '.join(exelist))
return 1
return clangformat(exelist, srcdir, builddir, options.check)
|
amagdas/eve-elastic | eve_elastic/elastic.py | Python | gpl-3.0 | 13,856 | 0.001371 |
import ast
import json
import arrow
import elasticsearch
from bson import ObjectId
from flask import request
from eve.utils import config
from eve.io.base import DataLayer
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
def parse_date(date_str):
"""Parse elastic datetime string."""
try:
date = arrow.get(date_str)
except TypeError:
date = arrow.get(date_str[0])
return date.datetime
def get_dates(schema):
"""Return list of datetime fields for given schema."""
dates = [config.LAST_UPDATED, config.DATE_CREATED]
for field, field_schema in schema.items():
if field_schema['type'] == 'datetime':
dates.append(field)
return dates
def format_doc(hit, schema, dates):
"""Format given doc to match given schema."""
doc = hit.get('_source', {})
doc.setdefault(config.ID_FIELD, hit.get('_id'))
doc.setdefault('_type', hit.get('_type'))
for key in dates:
if key in doc:
doc[key] = parse_date(doc[key])
return doc
def noop():
pass
def is_elastic(datasource):
"""Detect if given resource uses elastic."""
return datasource.get('backend') == 'elastic' or datasource.get('search_backend') == 'elastic'
class ElasticJSONSerializer(elasticsearch.JSONSerializer):
"""Customize the JSON serializer used in Elastic"""
def default(self, value):
"""Convert mongo.ObjectId."""
if isinstance(value, ObjectId):
return str(value)
return super(ElasticJSONSerializer, self).default(value)
class ElasticCursor(object):
"""Search results cursor."""
no_hits = {'hits': {'total': 0, 'hits': []}}
def __init__(self, hits=None, docs=None):
"""Parse hits into docs."""
self.hits = hits if hits else self.no_hits
self.docs = docs if docs else []
def __getitem__(self, key):
return self.docs[key]
def first(self):
"""Get first doc."""
return self.docs[0] if self.docs else None
def count(self, **kwargs):
"""Get hits count."""
| return int(self.hits['hits']['total'])
def extra(self, response):
"""Add extra info to response."""
if 'facets' in self.hits:
response['_facets'] = self.hits['facets']
if 'aggregations' in self.hits:
response['_aggregations'] = self.hits['aggregations']
def set_filters(query, base_filters):
"""Put together all filters we have and set them as 'and' filter
within filtered query.
:param query: elastic quer | y being constructed
:param base_filters: all filters set outside of query (eg. resource config, sub_resource_lookup)
"""
filters = [f for f in base_filters if f is not None]
query_filter = query['query']['filtered'].get('filter', None)
if query_filter is not None:
if 'and' in query_filter:
filters.extend(query_filter['and'])
else:
filters.append(query_filter)
if filters:
query['query']['filtered']['filter'] = {'and': filters}
def set_sort(query, sort):
query['sort'] = []
for (key, sortdir) in sort:
sort_dict = dict([(key, 'asc' if sortdir > 0 else 'desc')])
query['sort'].append(sort_dict)
def get_es(url):
o = urlparse(url)
es = elasticsearch.Elasticsearch(hosts=[{'host': o.hostname, 'port': o.port}])
es.transport.serializer = ElasticJSONSerializer()
return es
def get_indices(es):
return elasticsearch.client.IndicesClient(es)
class Elastic(DataLayer):
"""ElasticSearch data layer."""
serializers = {
'integer': int,
'datetime': parse_date,
'objectid': ObjectId,
}
def init_app(self, app):
app.config.setdefault('ELASTICSEARCH_URL', 'http://localhost:9200/')
app.config.setdefault('ELASTICSEARCH_INDEX', 'eve')
self.index = app.config['ELASTICSEARCH_INDEX']
self.es = get_es(app.config['ELASTICSEARCH_URL'])
self.create_index(self.index)
self.put_mapping(app)
def _get_field_mapping(self, schema):
"""Get mapping for given field schema."""
if 'mapping' in schema:
return schema['mapping']
elif schema['type'] == 'datetime':
return {'type': 'date'}
elif schema['type'] == 'string' and schema.get('unique'):
return {'type': 'string', 'index': 'not_analyzed'}
def create_index(self, index=None):
if index is None:
index = self.index
try:
get_indices(self.es).create(self.index)
except elasticsearch.TransportError:
pass
def put_mapping(self, app):
"""Put mapping for elasticsearch for current schema.
It's not called automatically now, but rather left for user to call it whenever it makes sense.
"""
indices = get_indices(self.es)
for resource, resource_config in app.config['DOMAIN'].items():
datasource = resource_config.get('datasource', {})
if not is_elastic(datasource):
continue
if datasource.get('source', resource) != resource: # only put mapping for core types
continue
properties = {}
properties[config.DATE_CREATED] = self._get_field_mapping({'type': 'datetime'})
properties[config.LAST_UPDATED] = self._get_field_mapping({'type': 'datetime'})
for field, schema in resource_config['schema'].items():
field_mapping = self._get_field_mapping(schema)
if field_mapping:
properties[field] = field_mapping
mapping = {'properties': properties}
indices.put_mapping(index=self.index, doc_type=resource, body=mapping, ignore_conflicts=True)
def find(self, resource, req, sub_resource_lookup):
args = getattr(req, 'args', request.args if request else {})
source_config = config.SOURCES[resource]
if args.get('source'):
query = json.loads(args.get('source'))
if 'filtered' not in query.get('query', {}):
_query = query.get('query')
query['query'] = {'filtered': {}}
if _query:
query['query']['filtered']['query'] = _query
else:
query = {'query': {'filtered': {}}}
if args.get('q', None):
query['query']['filtered']['query'] = _build_query_string(args.get('q'),
default_field=args.get('df', '_all'))
if 'sort' not in query:
if req.sort:
sort = ast.literal_eval(req.sort)
set_sort(query, sort)
elif self._default_sort(resource) and 'query' not in query['query']['filtered']:
set_sort(query, self._default_sort(resource))
if req.max_results:
query.setdefault('size', req.max_results)
if req.page > 1:
query.setdefault('from', (req.page - 1) * req.max_results)
filters = []
filters.append(source_config.get('elastic_filter'))
filters.append(source_config.get('elastic_filter_callback', noop)())
filters.append({'term': sub_resource_lookup} if sub_resource_lookup else None)
filters.append(json.loads(args.get('filter')) if 'filter' in args else None)
set_filters(query, filters)
if 'facets' in source_config:
query['facets'] = source_config['facets']
if 'aggregations' in source_config:
query['aggs'] = source_config['aggregations']
args = self._es_args(resource)
hits = self.es.search(body=query, **args)
return self._parse_hits(hits, resource)
def find_one(self, resource, req, **lookup):
def is_found(hit):
if 'exists' in hit:
hit['found'] = hit['exists']
return hit.get('found', False)
args = self._es_args(resource)
if config.ID_FIELD in lookup:
try:
hit = self.es.get(id=lookup[config.ID_FIELD], **args) |
rstens/gwells | gwells/models/BCGS_Numbers.py | Python | apache-2.0 | 996 | 0.003012 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .AuditModel import AuditModel
from django.db import models
import uuid
class BCGS_Numbers(AuditModel):
bcgs_id = models.BigIntegerField(primary_key=True, default=uuid.uuid4, editable=False)
bcgs_number = models.CharField(max_length=20 | , verbose_name="BCGS Mapsheet Number")
class Meta:
db_table = 'bcgs_number'
ordering = ['bcgs_number']
def __str__(self):
| return self.description
|
dmnfarrell/peat | PEATDB/plugins/biodatabaseplugin.py | Python | mit | 4,663 | 0.01587 | #!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# |
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
from Plugins import Plugi | n
from Tkinter import *
import Pmw
from GUI_helper import *
from Bio import Entrez
class BioDBQuery(Plugin, GUI_help):
"""Bio database query plugin for PEAT"""
capabilities = ['gui','uses_sidepane']
requires = ['biopython']
menuentry = 'Bio DB Query'
gui_methods = {'quit':'Quit'}
about = 'This plugin is a template'
def main(self, parent):
if parent==None:
return
self.parent = parent
self.DB = parent.DB
self.parentframe = None
self._doFrame()
return
def _doFrame(self):
self.ID='Bio DB Query Plugin'
if 'uses_sidepane' in self.capabilities:
self.mainwin = self.parent.createChildFrame()
else:
self.mainwin=Toplevel()
self.mainwin.title(self.ID)
self.mainwin.geometry('800x600+200+100')
methods = self._getmethods()
methods = [m for m in methods if m[0] in self.gui_methods]
self._createButtons(methods)
self.queryvar=StringVar()
Entry(self.mainwin,textvariable=self.queryvar, width=20,bg='white').pack(side=TOP,fill=BOTH,pady=4)
b=Button(self.mainwin,text='search',command=self.doSearch)
b.pack(side=TOP,fill=BOTH,pady=2)
self.dbvar=StringVar()
self.dbvar.set('pubmed')
opts = Pmw.OptionMenu(self.mainwin,
labelpos = 'w',
label_text = 'Database:',
menubutton_textvariable = self.dbvar,
items = ['pubmed', 'ncbisearch', 'protein', 'nucleotide',
'structure', 'genome', 'books',
'gene', 'genomeprj'],
menubutton_width = 10)
opts.pack(side=TOP,fill=BOTH,pady=2)
self.results = Pmw.ScrolledText(self.mainwin,
labelpos = 'n',
label_text='Search results',
usehullsize = 1,
hull_width = 400,
hull_height = 500,
text_wrap='word')
self.results.pack(side=TOP,fill=BOTH,padx=4,pady=4)
return
def _createButtons(self, methods):
"""Dynamically create buttons for supplied methods, which is a tuple
of (method name, label)"""
for m in methods:
b=Button(self.mainwin,text=self.gui_methods[m[0]],command=m[1])
b.pack(side=BOTTOM,fill=BOTH)
return
def doSearch(self, query=None):
import types
fields = ['Title','AuthorList','DOI']
structfields = ['PdbDescr', 'ExpMethod', 'EC',
'OrganismList', 'ProteinChainCount', 'PdbDepositDate',
'Resolution', 'Id']
if query == None:
query = self.queryvar.get()
db = self.dbvar.get()
if db == 'structure': fields = structfields
self.results.delete(1.0,END)
Entrez.email = "peat_support@ucd.ie"
handle = Entrez.esearch(db=db,term=query)
record = Entrez.read(handle)
idlist = record["IdList"]
for i in idlist:
handle = Entrez.esummary(db=db, id=i)
rec = Entrez.read(handle)
for f in fields:
try:
if type(rec[0][f]) is Entrez.Parser.ListElement:
for a in rec[0][f]:
self.results.insert(END,a+', ')
else:
self.results.insert(END,rec[0][f]+'\n')
except:
pass
self.results.insert(END,'\n'+'------------------------'+'\n')
return
def quit(self):
self.mainwin.destroy()
return
|
firmadyne/scraper | firmware/spiders/foscam.py | Python | mit | 1,854 | 0.005394 | from scrapy import Spider
from scrapy.http import Request
from firmware.items import FirmwareImage
from firmware.loader import FirmwareLoader
class FoscamSpider(Spider):
name = "foscam"
allowed_domains = ["foscam.com"]
start_urls = [
"http://www.foscam.com/download-center/firmware-downloads.html"]
def start_requests(self):
for url in self.start_urls:
yield Request(url, cookies={'loginEmail': "@.com"}, dont_filter=True)
d | ef parse(self, response):
for i in range(0, len(response.xpath("//div[@id='main_r | ight']/span[1]/p")), 7):
prods = response.xpath("//div[@id='main_right']/span[1]//p[%d]/text()" % (i + 2)).extract()[0].split("\r\n")
for product in [x for x in prods]:
item = FirmwareLoader(item=FirmwareImage(), response=response)
item.add_xpath("version", "//div[@id='main_right']/span[1]//p[%d]/text()" % (i + 3))
item.add_xpath("url", "//div[@id='main_right']/span[1]//p[%d]/a/@href" % (i + 7))
item.add_value("product", product)
item.add_value("vendor", self.name)
yield item.load_item()
for i in range(0, len(response.xpath("//div[@id='main_right']/span[2]/p")), 5):
prods = response.xpath("//div[@id='main_right']/span[2]//p[%d]/text()" % (i + 2)).extract()[0].split(",")
for product in [x for x in prods]:
item = FirmwareLoader(item=FirmwareImage(), response=response)
item.add_xpath("version", "//div[@id='main_right']/span[2]//p[%d]/text()" % (i + 3))
item.add_xpath("url", "//div[@id='main_right']/span[2]//p[%d]/a/@href" % (i + 5))
item.add_value("product", product)
item.add_value("vendor", self.name)
yield item.load_item()
|
dmlc/tvm | python/tvm/topi/generic/image.py | Python | apache-2.0 | 1,606 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Generic ima | ge operators"""
from .default import default_schedule as _default_schedule
def schedule_dilation2d_nchw(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilati | on2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
def schedule_dilation2d_nhwc(outs):
"""Schedule for dilation2d
Parameters
----------
outs : Array of Tensor
The computation graph description of dilation2d
in the format of an array of tensors.
Returns
-------
sch : Schedule
The computation schedule for the op.
"""
return _default_schedule(outs, False)
|
arth-co/shoop | shoop_tests/admin/test_product_variation.py | Python | agpl-3.0 | 2,122 | 0.002356 | # -*- coding: utf-8 -*-
from django.forms import formset_factory
import pytest
from shoop.admin.modules.products.views.variation.simple_variation_forms import SimpleVariationChildForm, SimpleVariationChildFormSet
from shoop.admin.modules.products.views.variation.variable_variation_forms import VariableVariationChildrenForm
from shoop.core.models.product_variation import ProductVariationVariable, ProductVariationVariableValue
from shoop.testing.factories import create_product
from shoop_tests.utils import printable_gibberish
from shoop_tests.utils.forms import get_form_data
@pytest.mark.django_db
def test_simple_children_formset():
FormSet = formset_factory(SimpleVariationChildForm, SimpleVariationChildFormSet, extra=5, can_delete=True)
parent = create_product(printable_gibberish())
child = create_product(printable_gibberish())
# No links yet
formset = FormSet(parent_product=parent)
assert formset.initial_form_count() == 0 # No children yet
# Save a link
data = dict(get_form_data(formset, True), **{"form-0-child": child.pk})
formset = FormSet(parent_product=parent, data=data)
formset.save()
assert parent.variation_children.filter(pk=child.pk).exists() # Got link'd!
# Remove the link
formset = FormSet(parent_product=parent)
assert formset.initial_form_count() == 1 # Got the child here
data = dict(get_form_data(formset, True), **{"form-0-DELETE": "1"})
formset = FormSet(parent_product=parent, data=data)
formset.save()
assert not parent.variation_children.exists() # Got unlinked
@pytest.mark.django_db
def test_variable_variation_form():
var1 = printable_gibberish()
var2 = printable_gibberish()
parent = create_product(printable_gibberish())
for a in range(4):
for b in range(3):
c | hild = create_product(printable_gibberish())
child.link_to_parent(parent, variables={var1: a, var2: b} | )
assert parent.variation_children.count() == 4 * 3
form = VariableVariationChildrenForm(parent_product=parent)
assert len(form.fields) == 12
# TODO: Improve this test?
|
Loodoor/UrWorld-Alpha-3.x | server/gentest.py | Python | gpl-2.0 | 6,381 | 0.004861 | #C:\Python34\python.exe
# -*- coding: utf-8 -*-
import random
from structs import *
BLOCK, VOID = True, False
foret = {
'stone': ['s'],
'snow': ['I'],
'grass': ['h'],
'dirt': ['U']
}
desert = {
'stone': ['s'],
'snow': ['d'],
'grass': ['d'],
'dirt': ['d']
}
noman_land = {
'stone': ['s'],
'snow': ['I'],
'grass': ['az', 'ze'] + ['0'] * 48,
'dirt': ['U']
}
tundra = {
'stone': ['s'],
'snow': ['I'],
'grass': ['I'],
'dirt': ['I']
}
mineral_world = {
'stone': ['s'] * 15 + ['a', 'r', 'u', 'y', 'i'],
'snow': ['s'],
'grass': ['h'],
'dirt': ['U']
}
class Map(list):
def __init__(self, length, flatness, height=range(1, 16), headstart=8, deniv=1, structs=Structure.structures):
self.structs = structs
self.current_biome_size = 0
self.current_biome = 0
self.liste_biomes = [
foret,
desert,
noman_land,
tundra,
mineral_world
]
#---------------- Binary terrain generation ----------------#
array = [[VOID for iy in range(height.stop)] for ix in range(length)]
mem = length * [0]
mem[0] = headstart
r = list(range(-deniv, deniv+1))
turns = 0
for x in range(1, length):
same = 0
for col in mem[:x-1:-1]:
if col == mem[x]:
same += 1
else:
break
new = (not random.randint(0, flatness//same)) * random.choice(r)
mem[x] = mem[x-1] + new
while mem[x] not in height:
if mem[x] < height.start:
mem[x] += 1
else:
mem[x] -= 1
if new < 0:
r = list(range(-deniv, 0)) + [0] * flatness
turns = flatness
elif new > 0:
r = list(range(1, deniv+1)) + [0] * flatness
turns = flatness
turns -= 1
if turns == 0:
r = list(range(-deniv, deniv+1))
for x, h in enumerate(mem):
array[x] = [BLOCK] * h + [VOID] * (len(array[x]) - h)
width = len(array)
height = len(array[0])
array = [[array[width-1 - x][y] for x in range(width)][::-1] for y in range(height)][::-1]
#---------------------- Biomes choice ----------------------#
biomes = [random.choice(self.liste_biomes) for _ in range(32)]
super().__init__(array)
#---------------------- Block setting ----------------------#
new_array = self[:]
what_can_be_in = lambda depth: [(block if (random.randint(0, 100) in range(block.prob)) and (len(new_array)-depth in block.strat) else STONE) for block in Mineral.blocks]
distance_from_surface = lambda x, y: len([line[x] for line in self[y::-1] if line[x]])
for y, line in enumerate(new_array):
for x, block in enumerate(line):
if block:
if distance_from_surface(x, y) > random.randint(3, 5):
new_block = random.choice(biomes[self.current_biome]['stone'])
elif 0 <= y <= 6 and distance_from_surface(x, y) == 1:
new_block = random.choice(biomes[self.current_biome]['snow'])
elif distance_from_surface(x, y) == 1:
new_block = random.choice(
['Q', 'S', '/§', PASSANT, PASSANT2, PASSANT3, PASSANT4, PASSANT5, PASSANT6,
PASSANT7, PASSANT8] + [random.choice(biomes[self.current_biome]['grass'])] * 75)
else:
new_block = random.choice(biomes[self.current_biome]['dirt'])
if y == 19:
new_block = INDES
#ajout du bloc sélectionné
new_array[y][x] = new_block
else:
new_array[y][x] = AIR
#on se décale d'un bloc dans le biome
self.current_biome_size += 1
if self.current_biome_size >= 128:
self.current_biome_size = 0
self.current_biome = self.current_biome + 1 if self.current_biome + 1 <= len(biomes) - 1 else 0
for y, line in enumerate(new_array):
for x, block in enumerate(line):
if block is STONE:
new_array[y][x] = random.choice(what_can_be_in(y))
super().__init__(new_array)
#-------------------- Structure setting --------------------#
new_array = self[:]
for y, line in enumerate(self):
for x, block in enumerate(line):
for s in self.structs:
if s.cond(self, (x, y)):
self.add_structure(s, (x, y))
def add_structure(self, struct, pos):
for block_pos, block in struct.substitute(*pos):
x, y = block_pos
if x >= 0 and y >= 0:
try:
self[x, y] = block
except IndexError:
pass
def __getitem__(self, item):
if isinstance(item, (tuple, list)):
x, y = item
try:
return self[y][x]
except IndexError:
pass
else:
return super().__getitem__(item)
def __setitem__(self, item, value):
if isinstance(item, (tuple, list)): |
x, y = item
try:
self[y][x] = value
except IndexError:
pass
else:
super().__setitem__(item, value)
def __add__(self, other):
new = self[:]
for y, line in enumerate(other):
new[y] += line
return new
def print_array(array):
rep = ''
for line in array:
for elt in line:
| rep += elt.rep
rep += '\n'
print(rep)
if __name__ == '__main__':
import sys
length, flatness, height = map(int, sys.argv[1:4])
headstart, deniv = height//2, 1
try:
headstart = int(sys.argv[4])
deniv = int(sys.argv[5])
except IndexError:
pass
noise = Map(length, flatness, range(1, height), headstart, deniv)
print_array(noise) |
mpeuster/son-emu | src/emuvim/dcemulator/son_emu_simple_switch_13.py | Python | apache-2.0 | 5,519 | 0.000181 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.topology.event import EventSwitchEnter, EventSwitchReconnected
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
# actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
# | ofproto.OFPCML_NO_BUFFER)]
actions = [parser.OFPActionOutput(ofproto.OFPCML_NO_BUFFER)]
self.add_flow(dat | apath, 0, match, actions)
def add_flow(self, datapath, priority, match,
actions, buffer_id=None, table_id=0):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst, table_id=table_id)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst, table_id=table_id)
datapath.send_msg(mod)
# new switch detected
@set_ev_cls([EventSwitchEnter, EventSwitchReconnected])
def _ev_switch_enter_handler(self, ev):
datapath = ev.switch.dp
self.logger.info('registered OF switch id: %s' % datapath.id)
ofproto = datapath.ofproto
self.logger.info('OF version: {0}'.format(ofproto))
# send NORMAL action for all undefined flows
ofp_parser = datapath.ofproto_parser
actions = [ofp_parser.OFPActionOutput(ofproto_v1_3.OFPP_NORMAL)]
self.add_flow(datapath, 0, None, actions, table_id=0)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
|
sofianehaddad/ot-svn | python/test/t_PiecewiseHermiteEvaluationImplementation_std.py | Python | mit | 726 | 0 | #! /usr/bin/env python
from openturns import *
ref = NumericalMathFunction("x", "sin(x)")
size = 12
locations | = NumericalPoint(size)
values = NumericalPoint(size)
derivatives = NumericalPoint(size)
# Build locations/values/derivatives with non-increasing locations
for i in range(size):
locations[i] = 10.0 * i * i / (size - 1.0) / (size - 1.0)
values[i] = ref([locations[i]])[0]
derivatives[i] = ref.gradient([locations[i]])[0, 0]
evaluation = PiecewiseHermiteEvaluationImplementatio | n(
locations, values, derivatives)
print "evaluation=", evaluation
# Check the values
for i in range(2 * size):
x = [-1.0 + 12.0 * i / (2.0 * size - 1.0)]
print "f( %.12g )=" % x[0], evaluation(x), ", ref=", ref(x)
|
ireapps/coding-for-journalists | 2_web_scrape/completed/fun_with_regex_done.py | Python | mit | 3,044 | 0.010184 | # Regular expressions are a powerful tool for pattern matching when you
# know the general format of what you're trying to find but want to keep
# it loose in terms of actual content: think finding email addresses or
# phone numbers based on what they have in common with each other. Python
# has a standard library that deals with it.
import re
#
records = [
'April 13, 2013 Cyberdyne Systems $4,000.00 18144 El Camino '
'Real, Sunnyvale, CA 94087 (408) 555-1234 info@cyberdyne.com '
'December 2, 2018 December 14, 2018',
'M | ay 4, 2 | 013 Sam Fuzz, Inc. $6,850.50 939 Walnut St, San '
'Carlos, CA 94070 (408) 555-0304 ceo@samfuzz.net January 28'
', 2016 February 15, 2016']
# Find the word 'Sunnyvale' in the first record with re.search()
re.search('Sunnyvale', records[0]).group()
# Find the first date in the first record. Let's pick apart the pattern:
# 1. \w matches upper/lowercase A-Z and digits 0-9, good for text.
# 2. {3,} matches three or more (shortest possible month is May)
# 3. \s matches whitespace, good for spaces and tabs
# 4. {1} matches exactly one
# 5. \d matches 0-9
# 6. {1,2} matches at least one, but no more than 2
# 7. , matches the comma in the date
# 8. \s{1}: again, one space or tab
# 9. \d{4} matches four digits.
re.search('\w{3,}\s{1}\d{1,2},\s{1}\d{4}', records[0]).group()
# Do the same thing but wrap some parentheses around the month, day and year
# patterns and re.search().group(0) to return the whole date.
date_match = re.search('(\w{3,})\s{1}(\d{1,2}),\s{1}(\d{4})', records[0])
date_match.group(0)
# Try 1, 2 and 3 to cycle through month, day and year.
date_match.group(1)
date_match.group(2)
date_match.group(3)
# Grab all the dates in the first record with re.findall().
all_dates = re.findall('\w{3,}\s{1}\d{1,2},\s{1}\d{4}', records[0])
# Print them out with a for loop
for date in all_dates:
print date
# Pick out and print dollar amounts from the records.
# . matches any character, * matches any number of times
for record in records:
money_match = re.search('\$.*\.\d{2}', record)
print money_match.group()
# Try to do the same thing for the phone numbers.
for record in records:
ph_match = re.search('\(\d{3}\)\s\d{3}-\d{4}', record)
print ph_match.group()
# How would I isolate something like a company name that's totally variable?
# Think about the hooks you have on either side; the pattern you want to
# match here has to do with what's around it.
for record in records:
company_match = re.search('\d{4}\s(.+)\s\$', record)
print company_match.group(1)
# We can also substitute based on a pattern. Give everyone an '.info'
# email address via print and re.sub().
for record in records:
print re.sub('\.\w{3}', '.info', record)
# If you have multiple character possibilities that act as delimiters for a
# string you want to break apart, re.split() can come in handy.
my_list = ['OCT-2010', 'NOV/2011', 'FEB 2012', 'MAR/2012']
for item in my_list:
print re.split('-|/|\s', item)
|
andreif/heroku_django | project/views.py | Python | mit | 49 | 0 | fro | m .apps.page import views as page
assert p | age
|
serratoro/timesheet-analyser | TimeSheetAnalyser/presenters/daily_attendance_plot_presenter.py | Python | gpl-3.0 | 3,657 | 0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Time-sheet Analyser: Python library which allows to analyse time-sheets.
# Copyright (C) 2017 Carlos Serra Toro.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURP | OSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, | see <http://www.gnu.org/licenses/>.
import math
import matplotlib.pyplot as plt
from matplotlib.dates import MONDAY
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter
from TimeSheetAnalyser.utils.misc import time_to_float_time, normalise_number,\
average_sequence
def daily_attendance_plot_presenter(time_sheet):
""" Represent a TimeSheet as a graphic view of all the attendance
entries separately, grouped by days, as a bar plot.
:param time_sheet: An object of type TimeSheet.
:return: True
"""
dates = time_sheet.get_dates(sort=True)
days_in_week = 5
min_hour = 24
max_hour = 0
start_times, end_times = [], []
for date in dates:
start_time = time_to_float_time(time_sheet[date][0].start_time)
end_time = time_to_float_time(time_sheet[date][-1].end_time)
start_times.append(start_time)
end_times.append(end_time)
if int(start_time) < min_hour:
min_hour = int(start_time)
if int(math.ceil(end_time)) > max_hour:
max_hour = int(math.ceil(end_time))
hours_range = [min_hour, min(24, max_hour)]
fig, axes = plt.subplots()
# Plots the starting and ending times for each day.
axes.plot_date(dates, start_times, fmt='og', visible=True)
axes.plot_date(dates, end_times, fmt='or', visible=True)
# Prints the time-spans for each day.
for date in dates:
for time_span in time_sheet[date]:
start_time = time_to_float_time(time_span.start_time)
end_time = time_to_float_time(time_span.end_time)
normalised_start_time = normalise_number(start_time,
input_range=hours_range)
normalised_end_time = normalise_number(end_time,
input_range=hours_range)
axes.axvline(x=date, ymin=normalised_start_time,
ymax=normalised_end_time, color='b')
# Plots the averaged starting & ending times.
average_line_fmt = {'fmt': 'm-', 'linewidth': 2, 'visible': True}
average_start_times = average_sequence(start_times, win_size=days_in_week)
axes.plot_date(dates, average_start_times, **average_line_fmt)
average_end_times = average_sequence(end_times, win_size=days_in_week)
axes.plot_date(dates, average_end_times, **average_line_fmt)
axes.grid(True)
axes.set_ylim(hours_range)
axes.xaxis.set_major_locator(MonthLocator())
axes.xaxis.set_minor_locator(WeekdayLocator(MONDAY))
axes.xaxis.set_major_formatter(DateFormatter("%B %Y"))
fig.autofmt_xdate()
hours_list = range(hours_range[0], hours_range[1] + 1)
plt.yticks(hours_list, [str(hour) for hour in hours_list],
rotation='horizontal')
plt.show()
return True
|
jbedorf/tensorflow | tensorflow/python/kernel_tests/ctc_loss_op_test.py | Python | apache-2.0 | 31,093 | 0.004567 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ctc_ops.ctc_decoder_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.f | ramework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import ctc_ops
from tensorflow.python.ops import gradients_ | impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
def SimpleSparseTensorFrom(x):
"""Create a very simple SparseTensor with dimensions (batch, time).
Args:
x: a list of lists of type int
Returns:
x_ix and x_val, the indices and values of the SparseTensor<2>.
"""
x_ix = []
x_val = []
for batch_i, batch in enumerate(x):
for time, val in enumerate(batch):
x_ix.append([batch_i, time])
x_val.append(val)
x_shape = [len(x), np.asarray(x_ix).max(0)[1] + 1]
x_ix = constant_op.constant(x_ix, dtypes.int64)
x_val = constant_op.constant(x_val, dtypes.int32)
x_shape = constant_op.constant(x_shape, dtypes.int64)
return sparse_tensor.SparseTensor(x_ix, x_val, x_shape)
def _ctc_loss_v2(labels, inputs, sequence_length,
preprocess_collapse_repeated=False,
ctc_merge_repeated=True,
ignore_longer_outputs_than_inputs=False,
time_major=True):
"""Call ctc_loss_v2 with v1 args."""
assert not preprocess_collapse_repeated
assert ctc_merge_repeated
assert not ignore_longer_outputs_than_inputs
return ctc_ops.ctc_loss_v2(
labels=labels,
logits=inputs,
logit_length=sequence_length,
label_length=None,
blank_index=-1,
logits_time_major=time_major)
class CTCLossTest(test.TestCase):
def _testCTCLoss(self,
inputs,
seq_lens,
labels,
loss_truth,
grad_truth,
expected_err_re=None):
self.assertEquals(len(inputs), len(grad_truth))
inputs_t = constant_op.constant(inputs)
with self.cached_session(use_gpu=False) as sess:
loss = _ctc_loss_v2(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
grad = gradients_impl.gradients(loss, [inputs_t])[0]
self.assertShapeEqual(loss_truth, loss)
self.assertShapeEqual(grad_truth, grad)
if expected_err_re is None:
(tf_loss, tf_grad) = self.evaluate([loss, grad])
self.assertAllClose(tf_loss, loss_truth, atol=1e-6)
self.assertAllClose(tf_grad, grad_truth, atol=1e-6)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate([loss, grad])
@test_util.run_v1_only("b/120545219")
def testBasic(self):
"""Test two batch entries."""
# Input and ground truth from Alex Graves' implementation.
#
#### Batch entry 0 #####
# targets: 0 1 2 1 0
# outputs:
# 0 0.633766 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 0.588392 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 0.321418 0.00249248 0.00272882 0.0037688
# 3 0.0663296 0.643849 0.280111 0.00283995 0.0035545 0.00331533
# 4 0.458235 0.396634 0.123377 0.00648837 0.00903441 0.00623107
# alpha:
# 0 -3.64753 -0.456075 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -0.986437 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -2.12145 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -2.56174 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf -3.34211 -inf
# beta:
# 0 -inf -2.88604 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -2.35568 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -1.22066 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -0.780373 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -3.34211
# outputDerivs:
# 0 -0.366234 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 -0.411608 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 -0.678582 0.00249248 0.00272882 0.0037688
# 3 0.0663296 -0.356151 0.280111 0.00283995 0.0035545 0.00331533
# 4 -0.541765 0.396634 0.123377 0.00648837 0.00903441 0.00623107
#
#### Batch entry 1 #####
#
# targets: 0 1 1 0
# outputs:
# 0 0.30176 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 0.397533 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 0.202456
# 3 0.280884 0.429522 0.0326593 0.0339046 0.0326856 0.190345
# 4 0.423286 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# alpha:
# 0 -1.8232 -1.19812 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -2.19315 -2.83037 -2.1206 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -2.03268 -3.71783 -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -4.56292 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -5.42262 -inf
# beta:
# 0 -inf -4.2245 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -3.30202 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -1.70479 -0.856738 -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -0.859706 -0.859706 -0.549337 -inf
# 4 -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -5.42262
# outputDerivs:
# 0 -0.69824 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 -0.602467 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 -0.797544
# 3 0.280884 -0.570478 0.0326593 0.0339046 0.0326856 0.190345
# 4 -0.576714 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# max_time_steps == 7
depth = 6
# seq_len_0 == 5
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
# dimensions are time x depth
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
# seq_len_1 == 5
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.195 |
matthewoliver/swift | test/unit/common/middleware/s3api/helpers.py | Python | apache-2.0 | 6,907 | 0 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This stuff can't live in test/unit/__init__.py due to its swob dependency.
from copy import deepcopy
from hashlib import md5
from swift.common import swob
from swift.common.utils import split_path
from swift.common.request_helpers import is_sys_meta
class FakeSwift(object):
"""
A good-enough fake Swift proxy server to use in testing middleware.
"""
def __init__(self, s3_acl=False):
self._calls = []
self.req_method_paths = []
self.swift_sources = []
self.uploaded = {}
# mapping of (method, path) --> (response class, headers, body)
self._responses = {}
self.s3_acl = s3_acl
def _fake_auth_middleware(self, env):
if 'swift.authorize_override' in env:
return
if 'HTTP_AUTHORIZATION' not in env:
return
_, authorization = env['HTTP_AUTHORIZATION'].split(' ')
tenant_user, sign = authorization.rsplit(':', 1)
tenant, user = tenant_user.rsplit(':', 1)
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace(tenant_user, 'AUTH_' + tenant)
env['REMOTE_USER'] = 'authorized'
if env['REQUEST_METHOD'] == 'TEST':
# AccessDenied by default at s3acl authenticate
env['swift.authorize'] = \
lambda req: swob.HTTPForbidden(request=req)
else:
env['swift.authorize'] = lambda req: None
def __call__(self, env, start_response):
if self.s3_acl:
self._fake_auth_middleware(env)
req = swob.Request(env)
method = env['REQUEST_METHOD']
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if env.get('QUERY_STRING'):
path += '?' + env['QUERY_STRING']
if 'swift.authorize' in env:
resp = env['swift.authorize'](req)
if resp:
return resp(env, start_response)
headers = req.headers
self._calls.append((method, path, headers))
self.swift_sources.append(env.get('swift.source'))
try:
resp_class, raw_headers, body = self._responses[(method, path)]
headers = swob.HeaderKeyDict(raw_headers)
except KeyError:
# FIXME: suppress print state error for python3 compatibility.
# pylint: disable-msg=E1601
if (env.get('QUERY_STRING')
and (method, env['PATH_INFO']) in self._responses):
resp_class, raw_headers, body = self._responses[
(method, env['PATH_INFO'])]
headers = swob.HeaderKeyDict(raw_headers)
elif method == 'HEAD' and ('GET', path) in self._responses:
resp_class, raw_headers, _ = self._responses[('GET', path)]
body = None
headers = swob.HeaderKeyDict(raw_headers)
elif method == 'GET' and obj and path in self.uploaded:
resp_class = swob.HTTPOk
headers, body = self.uploaded[path]
else:
print("Didn't find %r in allowed responses" %
((method, path),))
raise
# simulate object PUT
if method == 'PUT' and obj:
input = env['wsgi.input'].read()
etag = md5(input).hexdigest()
headers.setdefault('Etag', etag)
headers.setdefault('Content-Length', len(input))
# keep it for subsequent GET requests later
self.uploaded[path] = (deepcopy(headers), input)
if "CONTENT_TYPE" in env:
self.uploaded[path][0]['Content-Type'] = env["CONTENT_TYPE"]
# range requests ought to work, but copies are special
support_range_and_conditional = not (
method == 'PUT' and
'X-Copy-From' in req.headers and
'Range' in req.headers)
resp = resp_class(req=req, headers=headers, body=body,
conditional_response=support_range_and_conditional)
return resp(env, start_response)
@property
def calls(self):
return [(method, path) for method, path, headers in self._calls]
@property
def calls_with_headers(self):
return self._calls
@property
def call_count(self):
return len(self._calls)
def register(self, method, path, response_class, headers, body):
# assuming the path format like /v1/account/container/object
resource_map = ['account', 'container', 'object']
acos = filter(None, split_path(path, 0, 4, True)[1:])
index = len(acos) - 1
resource = resource_map[index]
if (method, path) in self._responses:
old_headers = self._responses[(method, path)][1]
headers = headers.copy()
for key, value in old_headers.iteritems():
if is_sys_meta(resource, key) and key not in headers:
# keep old sysmeta for s3acl
headers.update({key: value})
self._responses[(method, path)] = (response_class, headers, body)
def register_unconditionally(self, method, path, response_class, headers,
body):
# register() keeps old sysme | ta around, but
# register_unconditionally() keeps nothing.
self._responses[(method, path)] = (response_class, headers, body)
def clear_calls(self):
del self._calls[:]
class UnreadableInput(object):
# Some clients will se | nd neither a Content-Length nor a Transfer-Encoding
# header, which will cause (some versions of?) eventlet to bomb out on
# reads. This class helps us simulate that behavior.
def __init__(self, test_case):
self.calls = 0
self.test_case = test_case
def read(self, *a, **kw):
self.calls += 1
# Calling wsgi.input.read with neither a Content-Length nor
# a Transfer-Encoding header will raise TypeError (See
# https://bugs.launchpad.net/swift3/+bug/1593870 in detail)
# This unreadable class emulates the behavior
raise TypeError
def __enter__(self):
return self
def __exit__(self, *args):
self.test_case.assertEqual(0, self.calls)
|
toobaz/pandas | pandas/tests/io/json/test_readlines.py | Python | bsd-3-clause | 5,700 | 0.001053 | from io import StringIO
import pytest
import pandas as pd
from pandas import DataFrame, read_json
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal, ensure_clean
from pandas.io.json._json import JsonReader
@pytest.fixture
def lines_json_df():
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
return df.to_json(lines=True, orient="records")
def test_read_jsonl():
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars():
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])
assert_frame_equal(result, expected)
def test_to_jsonl():
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])
result = df.to_json(orient="records", lines=True)
expected = '{"a\\\\":"foo\\\\","b":"bar"}\n' '{"a\\\\":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(read_json(result, lines=True), df)
@pytest.mark.parametrize("chunksize", [1, 1.0])
def test_readjson_chunks(lines_json_df, chunksize):
# Basic test that read_json(chunks=True) gives the same result as
# read_json(chunks=False)
# GH17048: memory usage when lines=True
unchunked = read_json(StringIO(lines_json_df), lines=True)
reader = read_json(StringIO(lines_json_df), lines=True, chunksize=chunksize)
chunked = pd.concat(reader)
assert_frame_equal(chunked, unchunked)
def test_readjson_chunksize_requires_lines(lines_json_df):
msg = "chunksize can only be passed if lines=True"
with pytest.raises(ValueError, match=msg):
pd.read_json(StringIO(lines_json_df), lines=False, chunksize=2)
def test_readjson_chunks_series():
# Test reading line-format JSON to Series with chunksize param
s = pd.Series({"A": 1, "B": 2})
strio = StringIO(s.to_json(lines=True, orient="records"))
unchunked = pd.read_json(strio, lines=True, typ="Series")
strio = StringIO(s.to_json(lines=True, orient="records"))
chunked = pd.concat(pd.read_json(strio, lines=True, typ="Series", chunksize=1))
assert_series_equa | l(chunked, unchunked)
def test_readjson_each_chunk(lines_json_df):
# Other tests check that the final result of read_json(chunksize=True)
# is correct. This checks the intermediate chunks.
chunks = list(pd.read_json(StringIO(lines_json_df), lines=True, chunksize=2))
assert chunks[0].shape == (2, 2)
assert chunks[1].shape == (1, 2)
def test_readjson_chunks_from_file():
| with ensure_clean("test.json") as path:
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df.to_json(path, lines=True, orient="records")
chunked = pd.concat(pd.read_json(path, lines=True, chunksize=1))
unchunked = pd.read_json(path, lines=True)
assert_frame_equal(unchunked, chunked)
@pytest.mark.parametrize("chunksize", [None, 1])
def test_readjson_chunks_closes(chunksize):
with ensure_clean("test.json") as path:
df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
df.to_json(path, lines=True, orient="records")
reader = JsonReader(
path,
orient=None,
typ="frame",
dtype=True,
convert_axes=True,
convert_dates=True,
keep_default_dates=True,
numpy=False,
precise_float=False,
date_unit=None,
encoding=None,
lines=True,
chunksize=chunksize,
compression=None,
)
reader.read()
assert (
reader.open_stream.closed
), "didn't close stream with \
chunksize = {chunksize}".format(
chunksize=chunksize
)
@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"])
def test_readjson_invalid_chunksize(lines_json_df, chunksize):
msg = r"'chunksize' must be an integer >=1"
with pytest.raises(ValueError, match=msg):
pd.read_json(StringIO(lines_json_df), lines=True, chunksize=chunksize)
@pytest.mark.parametrize("chunksize", [None, 1, 2])
def test_readjson_chunks_multiple_empty_lines(chunksize):
j = """
{"A":1,"B":4}
{"A":2,"B":5}
{"A":3,"B":6}
"""
orig = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
test = pd.read_json(j, lines=True, chunksize=chunksize)
if chunksize is not None:
test = pd.concat(test)
tm.assert_frame_equal(
orig, test, obj="chunksize: {chunksize}".format(chunksize=chunksize)
)
|
undefinedv/Jingubang | sqlmap/lib/controller/controller.py | Python | gpl-3.0 | 30,449 | 0.003481 | #!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import os
import re
from lib.controller.action import action
from lib.controller.checks import checkSqlInjection
from lib.controller.checks import checkDynParam
from lib.controller.checks import checkStability
from lib.controller.checks import checkString
from lib.controller.checks import checkRegexp
from lib.controller.checks import checkConnection
from lib.controller.checks import checkNullConnection
from lib.controller.checks import checkWaf
from lib.controller.checks import heuristicCheckSqlInjection
from lib.controller.checks import identifyWaf
from lib.core.agent import agent
from lib.core.common import dataToStdout
from lib.core.common import extractRegexResult
from lib.core.common import getFilteredPageContent
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import intersect
from lib.core.common import isListLike
from lib.core.common import parseTargetUrl
from lib.core.common import popValue
from lib.core.common import pushValue
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import safeCSValue
from lib.core.common import showHttpErrorCodes
from lib.core.common import urlencode
from lib.core.common import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import CONTENT_TYPE
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import HEURISTIC_TEST
from lib.core.enums import HTTPMETHOD
from lib.core.enums import NOTE
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.exception import SqlmapBaseException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapNotVulnerableException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapValueException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import ASP_NET_CONTROL_REGEX
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import EMPTY_FORM_FIELDS_REGEX
from lib.core.settings import IGNORE_PARAMETERS
from lib.core.settings import LOW_TEXT_PERCENT
from lib.core.settings import GOOGLE_ANALYTICS_COOKIE_PREFIX
from lib.core.settings import HOST_ALIASES
from lib.core.settings import REFERER_ALIASES
from lib.core.settings import USER_AGENT_ALIASES
from lib.core.target import initTargetEnv
from lib.core.target import setupTargetEnv
from thirdparty.pagerank.pagerank import get_pagerank
def _selectInjection():
"""
Selection function for injection place, parameters and type.
"""
points = {}
for injection in kb.injections:
place = injection.place
parameter = injection.parameter
ptype = injection.ptype
point = (place, parameter, ptype)
if point not in points:
points[point] = injection
else:
for key in points[point].keys():
if key != 'data':
points[point][key] = points[point][key] or injection[key]
points[point]['data'].update(injection['data'])
if len(points) == 1:
kb.injection = kb.injections[0]
elif len(points) > 1:
message = "there were multiple injection points, please select "
message += "the one to use for following injections:\n"
points = []
for i in xrange(0, len(kb.injections)):
place = kb.injections[i].place
parameter = kb.injections[i].parameter
ptype = kb.injections[i].ptype
point = (place, parameter, ptype)
if point not in points:
points.append(point)
ptype = PAYLOAD.PARAMETER[ptype] if isinstance(ptype, int) else ptype
message += "[%d] place: %s, parameter: " % (i, place)
message += "%s, type: %s" % (parameter, ptype)
if i == 0:
message += " (default)"
message += "\n"
message += "[q] Quit"
select = readInput(message, default="0")
if select.isdigit() and int(select) < len(kb.injections) and int(select) >= 0:
index = int(select)
elif select[0] in ("Q", "q"):
raise SqlmapUserQuitException
else:
errMsg = "invalid choice"
raise SqlmapValueException(errMsg)
kb.injection = kb.injections[index]
def _formatInjection(inj):
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else inj.place
data = "Parameter: %s (%s)\n" % (inj.parameter, paramType)
for stype, sdata in inj.data.items():
title = sdata.title
vector = sdata.vector
comment = sdata.comment
payload = agent.adjustLateValues(sdata.payload)
if inj.place == PLACE.CUSTOM_HEADER:
payload = payload.split(',', 1)[1]
if stype == PAYLOAD.TECHNIQUE.UNION:
count = re.sub(r"(?i)(\(.+\))|(\blimit[^A-Za-z]+)", "", sdata.payload).count(',') + 1
title = re.sub(r"\d+ to \d+", str(count), title)
vector = agent.forgeUnionQuery("[QUERY]", vector[0], vector[1], vector[2], None, None, vector[5], vector[6])
if count == 1:
title = title.replace("columns", "column")
elif comment:
vector = "%s%s" % (vector, comment)
data += " Type: %s\n" % PAYLOAD.SQLINJECTION[stype]
data += " Title: %s\n" % title
data += " Payload: %s\n" % urldecode(payload, unsafe="&", plusspace=(inj.place != PLACE.GET and kb.postSpaceToPlus))
data += " Vector: %s\n\n" % vector if conf.verbose > 1 else "\n"
return data
def _showInjections():
if kb.testQueryCount > 0:
header = "sqlmap identified the following injection point(s) with "
header += "a total of %d HTTP(s) requests" % kb.testQueryCount
else:
header = "sqlmap resumed the following injection point(s) from stored session"
if hasattr(conf, "api"):
conf.dumper.string("", kb.injections, content_type=CONTENT_TYPE.TECHNIQUES)
else:
data = "".join(set(map(lambda x: _formatInjection(x), kb.injections))).rstrip("\n")
conf.dumper.string(header, data)
if conf.tamper:
warnMsg = "changes made by tampering scripts are not "
warnMsg += "included in shown payload content(s)"
logger.warn(warnMsg)
if conf.hpp:
warnMsg = "changes made by HTTP parameter pollution are not "
warnMsg += "included in shown payload content(s)"
logger.warn(warnMsg)
def _randomFillBlankFields(value):
retVal = value
if extractRegexResult(EMPTY_FORM_FIELDS_REGEX, value):
message = "do you want to fill blank fields with random values? [Y/n] "
test = readInput(message, default="Y")
if not test or test[0] in ("y", "Y"):
for match in re.finditer(EMPTY_FORM_FIELDS_REGEX, retVal):
item = match.group("result")
if not any(_ in item for _ in IGNORE_PARAMETERS) and not re.search(ASP_NET_CONTROL_REGEX, item):
if item[-1] == DEFAULT_GET_POST_DELIMITER:
retVal = retVal.replace(item, "%s%s%s" % (item[:-1], randomStr(), DEF | AULT_GET_POST_DELIMITER))
else:
retVal = retVal.replace(item, "%s%s" % (item, randomStr()))
return retVal
def _saveToHash | DB():
injections = hashDBRetrieve(HASHDB_KEYS.KB_INJECTIONS, True)
if not isListLike(injections):
injections = []
injections.extend(_ for _ in kb.injections if _ and _.place is not None and _.parameter is not None)
_ = dict()
for injection in injections:
key = (injection.place, injection.parameter, injection.ptype)
if key not in _:
_[key] = injection
else:
_[key].data.update(injection.data)
|
akuks/pretix | src/pretix/control/views/orders.py | Python | apache-2.0 | 13,510 | 0.001776 | from datetime import timedelta
from itertools import groupby
from django import forms
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.utils.functional import cached_property
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from django.views.generic import DetailView, ListView, TemplateView, View
from pretix.base.models import CachedFile, CachedTicket, Item, Order, Quota
from pretix.base.services.export import export
from pretix.base.services.orders import mark_order_paid
from pretix.base.services.stats import order_overview
from pretix.base.services.tickets import generate
from pretix.base.signals import (
register_data_exporters, register_payment_providers,
register_ticket_outputs,
)
from pretix.control.forms.orders import ExtendForm
from pretix.control.permissions import EventPermissionRequiredMixin
class OrderList(EventPermissionRequiredMixin, ListView):
model = Order
context_object_name = 'orders'
template_name = 'pretixcontrol/orders/index.html'
paginate_by = 30
permission = 'can_view_orders'
def get_queryset(self):
qs = Order.objects.current.filter(
event=self.request.event
)
if self.request.GET.get("user", "") != "":
u = self.request.GET.get("user", "")
qs = qs.filter(
Q(user__email__icontains=u) | Q(user__givenname__icontains=u) | Q(user__familyname__icontains=u)
)
if self.request.GET.get("status", "") != "":
s = self.request.GET.get("status", "")
qs = qs.filter(status=s)
if self.request.GET.get("item", "") != "":
i = self.request.GET.get("item", "")
qs = qs.filter(positions__item_id__in=(i,)).distinct()
return qs.select_related("user")
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['items'] = Item.objects.current.filter(event=self.request.event)
return ctx
class OrderView(EventPermissionRequiredMixin, DetailView):
context_object_name = 'order'
model = Order
def get_object(self, queryset=None):
return Order.objects.current.get(
event=self.request.event,
code=self.kwargs['code'].upper()
)
@cached_property
def order(self):
return self.get_object()
@cached_property
def payment_provider(self):
responses = register_payment_providers.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
if provider.identifier == self.order.payment_provider:
return provider
def get_order_url(self):
return reverse('control:event.order', kwargs={
'event': self.request.event.slug,
'organizer': self.request.event.organizer.slug,
'code': self.order.code
})
class OrderDetail(OrderView):
template_name = 'pretixcontrol/order/index.html'
permission = 'can_view_orders'
@cached_property
def download_buttons(self):
buttons = []
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
if not provider.is_enabled:
continue
buttons.append({
'icon': provider.d | ownload_button_icon or 'fa-download',
'text': provider.download_button_text or 'fa-download',
'identifier': provider.identifier,
})
return buttons
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['items'] = self.get_items()
ctx['event'] = self.request.event |
ctx['download_buttons'] = self.download_buttons
ctx['can_download'] = (
self.request.event.settings.ticket_download
and self.order.status == Order.STATUS_PAID
)
ctx['payment'] = self.payment_provider.order_control_render(self.request, self.object)
return ctx
def get_items(self):
queryset = self.object.positions.all()
cartpos = queryset.order_by(
'item', 'variation'
).select_related(
'item', 'variation'
).prefetch_related(
'variation__values', 'variation__values__prop', 'item__questions',
'answers'
)
# Group items of the same variation
# We do this by list manipulations instead of a GROUP BY query, as
# Django is unable to join related models in a .values() query
def keyfunc(pos):
if (pos.item.admission and self.request.event.settings.attendee_names_asked) \
or pos.item.questions.all():
return pos.id, "", "", ""
return "", pos.item_id, pos.variation_id, pos.price
positions = []
for k, g in groupby(sorted(list(cartpos), key=keyfunc), key=keyfunc):
g = list(g)
group = g[0]
group.count = len(g)
group.total = group.count * group.price
group.has_questions = k[0] != ""
group.cache_answers()
positions.append(group)
return {
'positions': positions,
'raw': cartpos,
'total': self.object.total,
'payment_fee': self.object.payment_fee,
}
class OrderTransition(OrderView):
permission = 'can_change_orders'
def post(self, *args, **kwargs):
to = self.request.POST.get('status', '')
if self.order.status == 'n' and to == 'p':
try:
mark_order_paid(self.order, manual=True)
except Quota.QuotaExceededException as e:
messages.error(self.request, str(e))
else:
messages.success(self.request, _('The order has been marked as paid.'))
elif self.order.status == 'n' and to == 'c':
order = self.order.clone()
order.status = Order.STATUS_CANCELLED
order.save()
messages.success(self.request, _('The order has been cancelled.'))
elif self.order.status == 'p' and to == 'n':
order = self.order.clone()
order.status = Order.STATUS_PENDING
order.payment_manual = True
order.save()
messages.success(self.request, _('The order has been marked as not paid.'))
elif self.order.status == 'p' and to == 'r':
ret = self.payment_provider.order_control_refund_perform(self.request, self.order)
if ret:
return redirect(ret)
return redirect(self.get_order_url())
def get(self, *args, **kwargs):
to = self.request.GET.get('status', '')
if self.order.status == 'n' and to == 'c':
return render(self.request, 'pretixcontrol/order/cancel.html', {
'order': self.order,
})
elif self.order.status == 'p' and to == 'r':
return render(self.request, 'pretixcontrol/order/refund.html', {
'order': self.order,
'payment': self.payment_provider.order_control_refund_render(self.order),
})
else:
return HttpResponse(status=405)
class OrderDownload(OrderView):
@cached_property
def output(self):
responses = register_ticket_outputs.send(self.request.event)
for receiver, response in responses:
provider = response(self.request.event)
if provider.identifier == self.kwargs.get('output'):
return provider
def get(self, request, *args, **kwargs):
if not self.output or not self.output.is_enabled:
messages.error(request, _('You requested an invalid ticket output type.'))
return redirect(self.get_order_url())
if self.order.status != Order.STATUS_PAID:
messages.error(request, _('Order is not pai |
toobaz/pandas | pandas/tests/tslibs/test_period_asfreq.py | Python | bsd-3-clause | 2,130 | 0.000939 | import pytest
from pandas._libs.tslibs.frequencies import get_freq
from pandas._libs.tslibs.period import period_asfreq, period_ordinal
@pytest.mark.parametrize(
"freq1,freq2,expected",
[
("D", "H", 24),
("D", "T", 1440),
("D", "S", 86400),
("D", "L", 86400000),
("D", "U", 86400000000),
("D", "N", 86400000000000),
("H", "T", 60),
("H", "S", 3600),
("H", "L", 3600000),
("H", "U", 3600000000),
("H", "N", 3600000000000),
("T", "S", 60),
("T", "L", 60000),
("T", "U", 60000000),
("T", "N", 60000000000),
("S", "L", 1000),
("S", "U", 1000000),
("S", "N", 10 | 00000000),
("L", "U", 1000),
("L", "N", 1000000),
("U", "N", 1000),
],
)
def test_intra_day_conversion_factors(freq1, freq2, expected):
assert period_asfreq(1, get_freq(freq1), get_freq(freq2), False) == expected
@pytes | t.mark.parametrize(
"freq,expected", [("A", 0), ("M", 0), ("W", 1), ("D", 0), ("B", 0)]
)
def test_period_ordinal_start_values(freq, expected):
# information for Jan. 1, 1970.
assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq(freq)) == expected
@pytest.mark.parametrize(
"dt,expected",
[
((1970, 1, 4, 0, 0, 0, 0, 0), 1),
((1970, 1, 5, 0, 0, 0, 0, 0), 2),
((2013, 10, 6, 0, 0, 0, 0, 0), 2284),
((2013, 10, 7, 0, 0, 0, 0, 0), 2285),
],
)
def test_period_ordinal_week(dt, expected):
args = dt + (get_freq("W"),)
assert period_ordinal(*args) == expected
@pytest.mark.parametrize(
"day,expected",
[
# Thursday (Oct. 3, 2013).
(3, 11415),
# Friday (Oct. 4, 2013).
(4, 11416),
# Saturday (Oct. 5, 2013).
(5, 11417),
# Sunday (Oct. 6, 2013).
(6, 11417),
# Monday (Oct. 7, 2013).
(7, 11417),
# Tuesday (Oct. 8, 2013).
(8, 11418),
],
)
def test_period_ordinal_business_day(day, expected):
args = (2013, 10, day, 0, 0, 0, 0, 0, get_freq("B"))
assert period_ordinal(*args) == expected
|
biggihs/python-pptx | tests/test_api.py | Python | mit | 1,552 | 0 | # encoding: utf-8
"""
Test suite for pptx.api module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import os
import pytest
from pptx.api import Presentation
from pptx.opc.constants import CONTENT_TYPE as CT
from pptx.parts.presentation import PresentationPart
from .unitutil.mock import class_mock, instance_mock
class DescribePresentation(object):
def it_opens_default_template_on_no_path_provided(self, call_fixture):
Package_, path, prs_ = call_fixture
prs = Presentation()
Package_.open.assert_called_once_with(path)
assert prs is prs_
# fixtures -------------------------------------------------------
@pytest.fixture
def call_fixture(self, Package_, prs_, prs_part_):
path = os.path.abspath(
os.path.join(
os.path.split(__file__)[0], '../pptx/templates',
'default.pptx'
)
)
Package_.open.return_value.main_document_part = prs_part_
prs_part_.content_type = CT.PML_PRESENTATION_MAIN
prs_part_.present | ation = prs_
return Package_, path, prs_
# fixture components ---------------------------------------------
@pytest.fixture
def Package_(self, request):
return class_mock(request, 'pptx.api.Package')
@pytest.fixture
def | prs_(self, request):
return instance_mock(request, Presentation)
@pytest.fixture
def prs_part_(self, request):
return instance_mock(request, PresentationPart)
|
bjornwallner/proq2-server | apps/modeller9v8/modlib/modeller/topology.py | Python | gpl-3.0 | 2,065 | 0.002906 | """Classes to handle information from residue topology files"""
import _modeller
__docformat__ = "epytext en"
class Topology(object):
"""All information from a residue topology file. You should never need to
create a L{Topology} object yourself - one is created for you by the
L{environ} class, e.g. as C{env.libs.topology}."""
def __init__(self, libs):
self.__libs = libs
self._modpt = _modeller.mod_libraries_tpl_get(libs.modpt)
def clear(self):
"""Remove all topology information"""
_modeller.mod_topology_clear(self._modpt, self.__libs.modpt)
def read(self, file):
"""Reads a residue topology file"""
self.clear()
return self.append(file)
def append(self, file):
"""Appends information from a residue topology file"""
return _modeller.mod_topology_read(self._modpt, self.__libs.modpt, file)
def make(self, submodel):
"""Reduces the most detailed topology to a sub-topology model.
@param submodel: an integer from 1 to 10, to specify the sub-topology
model as defined in C{models.lib}.
"""
self.submodel = submodel
return _modeller.mod_topology_model_make(self._modpt)
def write(self, file):
"""Writes topology library to a file"""
return _modeller.mod_topology_model_write(self._modpt,
self.__libs.modpt, file)
def __get_submodel(self):
return _modeller.mod_topology_submodel_get(self._modpt)
def __set_submodel(self, val):
_modeller.mod_topology_submodel_set(self._modpt, val)
def __get_in_memory(self):
return _modelle | r.mod_topology_in_memory(self._modpt)
submodel = property(__get_submodel, __set_submodel,
doc="Topology sub-model, an integer from 1 to 10, " + \
"as defined in C{models.lib}")
in_memory = property(__get_in_memory,
| doc="True if information has been read into memory")
|
stevec7/gpfs | gpfs/mmpmon.py | Python | mit | 1,059 | 0.016053 | class mmpmon(object):
def __init__(self):
self.name = 'mmpmon'
self.nodefields = { '_n_': 'nodeip', '_nn_': 'nodename',
'_rc_': 'status', '_t_': 'seconds', '_tu_': 'microsecs',
'_br_': 'bytes_read', '_bw_': 'bytes_written',
'_oc_': 'opens', '_cc_': 'closes', '_rdc_': 'reads',
'_wc_': 'writes', '_dir_': 'readdir', '_iu_': 'inode_updates' } |
self.nodelabels = {}
self | .fsfields = { '_n_': 'nodeip', '_nn_': 'nodename',
'_rc_': 'status', '_t_': 'seconds', '_tu_': 'microsecs',
'_cl_': 'cluster', '_fs_': 'filesystem', '_d_': 'disks',
'_br_': 'bytes_read', '_bw_': 'bytes_written',
'_oc_': 'opens', '_cc_': 'closes', '_rdc_': 'reads',
'_wc_': 'writes', '_dir_': 'readdir', '_iu_': 'inode_updates' }
self.fslabels = {}
def _add_nodes(self, nodelist):
"""Add nodes to the mmpmon nodelist"""
return
def _reset_stats(self):
"""Reset the IO stats"""
return
|
ttreeagency/PootleTypo3Org | pootle/apps/staticpages/managers.py | Python | gpl-2.0 | 1,705 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 20 | 13 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later ver | sion.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from django.db.models import F, Manager, Q
class PageManager(Manager):
def live(self, user=None, **kwargs):
"""Filters active (live) pages.
:param user: Current active user. If omitted or the user doesn't
have administration privileges, only active pages will be
returned.
"""
if user and user.is_superuser:
return self.get_query_set()
else:
return self.get_query_set().filter(active=True)
def pending_user_agreement(self, user, **kwargs):
"""Filters active pages where the given `user` has pending
agreements.
"""
# FIXME: This should be a method exclusive to a LegalPage manager
return self.live().filter(
Q(agreement__user=user,
modified_on__gt=F('agreement__agreed_on')) |
~Q(agreement__user=user)
)
|
singer-io/singer-python | tests/test_catalog.py | Python | apache-2.0 | 5,436 | 0.002943 | import unittest
from singer.schema import Schema
from singer.catalog import Catalog, CatalogEntry, write_catalog
class TestWriteCatalog(unittest.TestCase):
def test_write_empty_catalog(self):
catalog = Catalog([])
write_catalog(catalog)
def test_write_catalog_with_streams(self):
catalog = Catalog([CatalogEntry(tap_stream_id='a',schema=Schema(),metadata=[])])
write_catalog(catalog)
class TestGetSelectedStreams(unittest.TestCase):
def test_one_selected_stream(self):
selected_entry = CatalogEntry(tap_stream_id='a',
schema=Schema(),
metadata=[{'metadata':
{'selected': True},
'breadcrumb': []}])
catalog = Catalog(
[selected_entry,
CatalogEntry(tap_stream_id='b',schema=Schema(),metadata=[]),
CatalogEntry(tap_stream_id='c',schema=Schema(),metadata=[])])
state = {}
selected_streams = catalog.get_selected_streams(state)
self.assertEquals([e for e in selected_streams],[selected_entry])
def test_resumes_currently_syncing_stream(self):
selected_entry_a = CatalogEntry(tap_stream_id='a',
schema=Schema(),
metadata=[{'metadata':
{'selected': True},
'breadcrumb': []}])
selected_entry_c = CatalogEntry(tap_stream_id='c',
schema=Schema(),
metadata=[{'metadata':
{'selected': True},
'breadcrumb': []}])
catalog = Catalog(
[selected_entry_a,
CatalogEntry(tap_stream_id='b',schema=Schema(),metadata=[]),
selected_entry_c])
state = {'currently_syncing': 'c'}
selected_streams = catalog.get_selected_streams(state)
self.assertEquals([e for e in selected_streams][0],selected_entry_c)
class TestToDictAndFromDict(unittest.TestCase):
dict_form = {
'streams': [
{
'stream': 'users',
'tap_stream_id': 'prod_users',
'stream_alias': 'users_alias',
'database_name': 'prod',
'table_name': 'users',
'schema': {
'type': 'object',
'selected': True,
'properties': {
'id': {'type': 'integer', 'selected': True},
'name': {'type': 'string', 'selected': True}
}
},
'metadata': [
{
'metadata': {
'metadata-key': 'metadata-value'
},
'breadcrumb': [
'properties',
'name',
],
},
],
},
{
'stream': 'orders',
'tap_stream_id': 'prod_orders',
'database_name': 'prod',
'table_name': 'orders',
'schema': {
'type': 'object',
'selected': True,
'properties': {
| 'id': {'type': 'integer', 'selected': True},
'amount': {'type': 'number', 'selected': True}
}
}
}
| ]
}
obj_form = Catalog(streams=[
CatalogEntry(
stream='users',
tap_stream_id='prod_users',
stream_alias='users_alias',
database='prod',
table='users',
schema=Schema(
type='object',
selected=True,
properties={
'id': Schema(type='integer', selected=True),
'name': Schema(type='string', selected=True)}),
metadata=[{
'metadata': {
'metadata-key': 'metadata-value'
},
'breadcrumb': [
'properties',
'name',
],
}]),
CatalogEntry(
stream='orders',
tap_stream_id='prod_orders',
database='prod',
table='orders',
schema=Schema(
type='object',
selected=True,
properties={
'id': Schema(type='integer', selected=True),
'amount': Schema(type='number', selected=True)}))])
def test_from_dict(self):
self.assertEqual(self.obj_form, Catalog.from_dict(self.dict_form))
def test_to_dict(self):
self.assertEqual(self.dict_form, self.obj_form.to_dict())
class TestGetStream(unittest.TestCase):
def test(self):
catalog = Catalog(
[CatalogEntry(tap_stream_id='a'),
CatalogEntry(tap_stream_id='b'),
CatalogEntry(tap_stream_id='c')])
entry = catalog.get_stream('b')
self.assertEquals('b', entry.tap_stream_id)
|
PseudoAj/MyInsightRepo | data/examples/consumer/comsumeMsgs.py | Python | mit | 1,513 | 0.00727 | #!/usr/bin/env python
#title :comsumeMsgs.py
#description :The class is a simple cosumer to print message
#author :Ajay Krishna Teja Kavuri
#date :01252017
#version :0.1
#==============================================================================
# Libraries
from kafka import KafkaConsumer
#==============================================================================
# Implementation
class Consumer():
# Initialize
def __init__(self, cnctnAddr):
# Assign the connection
self.cnctnAddr = cnctnAddr
# Have a group id to optimize the consumer
self.grpId = 'test-consumer'
# Assign a topic
self.topi | c = 'electricity'
# Initialize the consumer here
self.consumer = KafkaConsumer(self.topic, group_id=self.grpId, bootstrap_servers=self.cnctnAddr)
# Actual method to start consuming
def consumeMsgs(self):
# Loop through the consumer
for message in self.consumer:
# Print the | consumer
print "Message topic: "+str(message.topic)+" Message partition: "+str(message.partition)+" Message offset: "+str(message.offset)+" Message key: "+str(message.key)+" Message value: "+str(message.value)
# Main method for triggering consumptiom
if __name__ == '__main__':
# Define the address
cnctnAddr = '172.31.0.234:9092'
# Initialize the class
thisConsumer = Consumer(cnctnAddr)
# start the consumer
thisConsumer.consumeMsgs()
|
gangadharkadam/saloon_frappe | frappe/patches/v5_0/move_scheduler_last_event_to_system_settings.py | Python | mit | 215 | 0.027907 | import frappe
def execute():
frappe.rel | oad_doctype('System Settings')
last = frappe.db.get_global('scheduler_last_event')
frappe.db.set_value('System | Settings', 'System Settings', 'scheduler_last_event', last)
|
lsommerer/worms | ui_new_world_dialog.py | Python | gpl-3.0 | 6,133 | 0.001141 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'new_world.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(409, 261)
Dialog.setModal(True)
self.verticalLayoutWidget = QtWidgets.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 385, 231))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.WorldPop = QtWidgets.QGroupBox(self.verticalLayoutWidget)
self.WorldPop.setMinimumSize(QtCore.QSize(200, 0))
self.WorldPop.setObjectName("WorldPop")
self.human = QtWidgets.QSpinBox(self.WorldPop)
self.human.setGeometry(QtCore.QRect(140, 20, 42, 20))
self.human.setButtonSymbols(QtWidgets.QAbstractSpinBox.UpDownArrows)
self.human.setMinimum(0)
self.human.setMaximum(100)
self.human.setSingleStep(1)
self.human.setProperty("value", 1)
self.human.setObjectName("human")
self.computer = QtWidgets.QSpinBox(self.WorldPop)
self.computer.setGeometry(QtCore.QRect(140, 60, 42, 22))
self.computer.setButtonSymbols(QtWidgets.QAbstractSpinBox.UpDownArrows)
self.computer.setMinimum(0)
self.computer.setMaximum(100)
self.computer.setSingleStep(1)
self.computer.setProperty("value", 3)
self.computer.setObjectName("computer")
self.heightLabel_3 = QtWidgets.QLabel(self.WorldPop)
self.heightLabel_3.setGeometry(QtCore.QRect(30, 20, 91, 20))
self.heightLabel_3.setObjectName("heightLabel_3")
self.widthLabel_3 = QtWidgets.QLabel(self.WorldPop)
self.widthLabel_3.setGeometry(QtCore.QRect(30, 60, 101, 20))
self.widthLabel_3.setObjectName("widthLabel_3")
self.widthLabel_4 = QtWidgets.QLabel(self.WorldPop)
self.widthLabel_4.setGeometry(QtCore.QRect(30, 100, 91, 20))
self.widthLabel_4.setObjectName("widthLabel_4")
self.wild = QtWidgets.QSpinBox(self.WorldPop)
self.wild.setGeometry(QtCore.QRect(140, 100, 42, 22))
self.wild.setButtonSymbols(QtWidgets.QAbstractSpinBox.UpDownArrows)
self.wild.setMinimum(0)
self.wild.setMaximum(100)
self.wild.setSingleStep(1)
self.wild.setProperty("value", 0)
self.wild.setObjectName("wild")
self.human.raise_()
self.computer.raise_()
self.heightLabel_3.raise_()
self.widthLabel_3.raise_()
self.widthLabel_4.raise_()
self.wild.raise_()
self.horizontalLayout_2.addWidget(self.WorldPop)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.worldSize = QtWidgets.QGroupBox(self.verticalLayoutWidget)
self.worldSize.setMinimumSize(QtCore.QSize(150, 0))
self.worldSize.setMaximumSize(QtCore.QSize(186, 16777215))
self.worldSize.setObjectName("worldSize")
self.height = QtWidgets.QSpinBox(self.worldSize)
self.height.setGeometry(QtCore.QRect(100, 20, 42, 20))
self.height.setButtonSymbols(QtWidgets.QAbstractSpinBox.UpDownArrows)
self.height.setMinimum(5)
self.height.setMaximum(999)
self.height.setSingleStep(5)
self.height.setProperty("value", 30)
self | .height.setObjectName("height")
self.width = QtWidgets.QSpinBox(self.worldSize)
self.width.setGeometry(QtCore.QRect(100, 60, 42, 22))
self.width.setButtonSymbols(QtWidge | ts.QAbstractSpinBox.UpDownArrows)
self.width.setMinimum(5)
self.width.setMaximum(999)
self.width.setSingleStep(5)
self.width.setProperty("value", 30)
self.width.setObjectName("width")
self.heightLabel = QtWidgets.QLabel(self.worldSize)
self.heightLabel.setGeometry(QtCore.QRect(40, 20, 47, 20))
self.heightLabel.setObjectName("heightLabel")
self.widthLabel = QtWidgets.QLabel(self.worldSize)
self.widthLabel.setGeometry(QtCore.QRect(40, 60, 47, 20))
self.widthLabel.setObjectName("widthLabel")
self.horizontalLayout_2.addWidget(self.worldSize)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.buttonBox = QtWidgets.QDialogButtonBox(self.verticalLayoutWidget)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout_2.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.WorldPop.setTitle(_translate("Dialog", "World Population"))
self.heightLabel_3.setText(_translate("Dialog", "Human Controlled"))
self.widthLabel_3.setText(_translate("Dialog", "Computer Controlled"))
self.widthLabel_4.setText(_translate("Dialog", "Wild (no rules)"))
self.worldSize.setTitle(_translate("Dialog", "World Size"))
self.heightLabel.setText(_translate("Dialog", "Height"))
self.widthLabel.setText(_translate("Dialog", "Width"))
|
ruar18/competitive-programming | algorithms-data-structures/sorting/insertion-sort.py | Python | mit | 493 | 0.002028 | # Impleme | ntation of the insertion-sort sorting algorithm
# O(N^2)
# Insertion sort
def insertionSort(a):
final = a
# Loop through every integer to be sorted
for i in range(1, len(a)):
key = a[i]
j = i - 1
# Find ideal location
while j >= 0 and final[j] > key:
final[j+1] = final[j]
j -= 1
final[j+1] = key
return final
# Get array from input
array = list(map(int, input().split()))
print(insertionSort(arra | y))
|
malb/pyme | pyme/errors.py | Python | gpl-2.0 | 1,710 | 0.008772 | # $Id$
# Copyright (C) 2004 Igor Belyi <belyi@users.sourceforge.net>
# Copyright (C) 2002 John Goerzen <jgoerzen@complete.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Softwar | e
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import pygpgme
class GPGMEEr | ror(Exception):
def __init__(self, error = None, message = None):
self.error = error
self.message = message
def getstring(self):
message = "%s: %s" % (pygpgme.gpgme_strsource(self.error),
pygpgme.gpgme_strerror(self.error))
if self.message != None:
message = "%s: %s" % (self.message, message)
return message
def getcode(self):
return pygpgme.gpgme_err_code(self.error)
def getsource(self):
return pygpgme.gpgme_err_source(self.error)
def __str__(self):
return "%s (%d,%d)"%(self.getstring(),self.getsource(),self.getcode())
EOF = getattr(pygpgme, "EOF")
def errorcheck(retval, extradata = None):
if retval:
raise GPGMEError(retval, extradata)
|
consultit/Ely | ely/direct/data_structures_and_algorithms/ch02/sequence_iterator.py | Python | lgpl-3.0 | 1,665 | 0.000601 | # Copyright 2013, Michael H. Goldwasser
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser
# John Wiley & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class SequenceIterator | :
"""An iterator for any of Python's sequence types."""
def __init__(self, sequence):
"""Create an iterator for the given sequence."""
self._seq = sequence # keep a reference to the underlying data
self._k = -1 # will increment to 0 on first call to next
def __next__(self):
"""Return the next element, o | r else raise StopIteration error."""
self._k += 1 # advance to next index
if self._k < len(self._seq):
return(self._seq[self._k]) # return the data element
else:
raise StopIteration() # there are no more elements
def __iter__(self):
"""By convention, an iterator must return itself as an iterator."""
return self
|
open-synergy/opnsynid-hr | hr_attendance_overtime_request_tier_validation/models/__init__.py | Python | agpl-3.0 | 192 | 0 | # -*- coding: utf-8 | -*-
# Copyright 2018 OpenSynergy Indonesia
# Li | cense AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import hr_overtime_request
from . import tier_definition
|
blueboxgroup/neutron | neutron/plugins/ml2/driver_api.py | Python | apache-2.0 | 35,825 | 0 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
# The following keys are used in the segment dictionaries passed via
# the driver API. These are defined separately from similar keys in
# neutron.extensions.providernet so that drivers don't need to change
# if/when providernet moves to the core API.
#
ID = 'id'
NETWORK_TYPE = 'network_type'
PHYSICAL_NETWORK = 'physical_network'
SEGMENTATION_ID = 'segmentation_id'
# The following keys are used in the binding level dictionaries
# available via the binding_levels and original_binding_levels
# PortContext properties.
BOUND_DRIVER = 'bound_driver'
BOUND_SEGMENT = 'bound_segment'
@six.add_metaclass(abc.ABCMeta)
class TypeDriver(object):
"""Define stable abstract interface for ML2 type drivers.
ML2 type drivers each support a specific network_type for provider
and/or tenant network segments. Type drivers must implement this
abstract interface, which defines the API by which the plugin uses
the driver to manage the persistent type-specific resource
allocation state associated with network segments of that type.
Network segments are represented by segment dictionaries using the
NETWORK_TYPE, PHYSICAL_NETWORK, and SEGMENTATION_ID keys defined
above, corresponding to the provider attributes. Future revisions
of the TypeDriver API may add additional segment dictionary
keys. Attributes not applicable for a particular network_type may
either be excluded or stored as None.
"""
@abc.abstractmethod
def get_type(self):
"""Get driver's network type.
:returns network_type value handled by this driver
"""
pass
@abc.abstractmethod
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
pass
@abc.abstractmethod
def is_partial_segment(self, segment):
"""Return True if segment is a partially specified segment.
:param segment: segment dictionary
:returns: boolean
"""
@abc.abstractmethod
def validate_provider_segment(self, segment):
"""Validate attributes of a provider network segment.
:param segment: segment dictionary using keys defined above
:raises: neutron.common.exceptions.InvalidInput if invalid
Called outside transaction context to validate the provider
attributes for a provider network segment. Raise InvalidInput
if:
- any required attribute is missing
- any prohibited or unrecognized attribute is present
- any attribute value is not valid
The network_type attribute is present in segment, but
need not be validated.
"""
pass
@abc.abstractmethod
def reserve_provider_segment(self, session, segment):
"""Reserve resource associated with a provider network segment.
:param session: database session
:param segment: segment dictionary
:returns: segment dictionary
Called inside transaction context on session to reserve the
type-specific resource for a provider network segment. The
segment dictionary passed in was returned by a previous
validate_provider_segment() call.
"""
pass
@abc.abstractmethod
def allocate_tenant_segment(self, session):
"""Allocate resource for a new tenant network segment.
:param session: database session
:returns: segment dictionary using keys defined above
Called inside transaction context on session to allocate a new
tenant network, typically from a type-specific resource
pool. If successful, return a segment dictionary describing
the segment. If tenant network segment cannot be allocated
(i.e. tenant networks not supported or resource pool is
exhausted), return None.
"""
pass
@abc.abstractmethod
def release_segment(self, session, segment):
"""Release network segment.
:param session: database session
:param segment: segment dictionary using keys defined above
Called inside transaction context on session to release a
tenant or provider network's type-specific resource. Runtime
errors are not expected, but raising an exception will result
in rollback of the transaction.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class NetworkContext(object):
"""Context passed to MechanismDrivers for changes to network resources.
A NetworkContext instance wraps a network resource. It provides
helper methods for accessing other relevant information. Results
from expensive operations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the network in its current configuration.
Return the network, as defined by NeutronPluginBaseV2.
create_network and all extensions in the ml2 plugin, with
all its properties 'current' at the time the context was
established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the network in its original configuration.
Return the network, with all its properties set to their
original values prior to a call to update_network. Method is
only valid within calls to update_network_precommit and
update_network_postcommit.
"""
pass
@abc.abstractproperty
def network_segments(self):
"""Return the segments associated with this network resource."""
pass
@six.add_metaclass(abc.ABCMeta)
class SubnetContext(object):
"""Context passed to MechanismDrivers for changes to subnet resources.
A SubnetContext instance wraps a subnet resource. It provides
helper methods for accessing other relevant information. Results
from expensive o | perations are cached so that other
MechanismDrivers can freely access the same information.
"""
@abc.abs | tractproperty
def current(self):
"""Return the subnet in its current configuration.
Return the subnet, as defined by NeutronPluginBaseV2.
create_subnet and all extensions in the ml2 plugin, with
all its properties 'current' at the time the context was
established.
"""
pass
@abc.abstractproperty
def original(self):
"""Return the subnet in its original configuration.
Return the subnet, with all its properties set to their
original values prior to a call to update_subnet. Method is
only valid within calls to update_subnet_precommit and
update_subnet_postcommit.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class PortContext(object):
"""Context passed to MechanismDrivers for changes to port resources.
A PortContext instance wraps a port resource. It provides helper
methods for accessing other relevant information. Results from
expensive operations are cached so that other MechanismDrivers can
freely access the same information.
"""
@abc.abstractproperty
def current(self):
"""Return the port in its current configuration.
Return the port, as defined by NeutronPluginBaseV2.
create_port and all extensions in the ml2 plugin, with
|
uraxy/imozzle | api/migrations/0007_auto_20170105_2339.py | Python | mit | 446 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 o | n 2017-01-05 14:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20161229_1650'),
]
operations = [
migrations.AlterField(
model_name='feed',
name='meta',
field=models.BooleanField(default=False),
| ),
]
|
Mikescher/Project-Euler_Befunge | compiled/Python2/Euler_Problem-020.py | Python | mit | 1,973 | 0.058287 | #!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABAC9jz0KAjEQha+SnZhmh5hMhogECbZeYrcR0qZK6dkddxVEEGOzrxjmD977mt9AbQMPynCFTDYNus3Jc/XEFW4XwMA1By5yNoRr4wp4sJ7LSFwwxYjm"
+ "cFRNdWla6k5/rOdJD5VDslQ4VCaHMVYnZjIimWcznvpMVuUFgAUm84uA3whQKRkTWXnpjf9N5/2D4NfXX/HVHUV2dgFeAgAA")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<101 and y<6):
return g[y*101 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<101 and y<6):
g[y*101 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
sa(99)
sa(0)
return 1
def _1():
| return (6)if(sp()!=0)else(2)
def _2():
sa(sr());
gw(0,3,sp())
gw(1,3,0)
gw(2,3,199)
return 3
def _3():
global t0
t0=(((gr((gr(2,3)%100)+1,gr(2,3)/100)-48)*gr(0,3))+gr(1,3))/10
gw((gr(2,3)%100)+ | 1,gr(2,3)/100,((((gr((gr(2,3)%100)+1,gr(2,3)/100)-48)*gr(0,3))+gr(1,3))%10)+48)
gw(1,3,t0)
t0=gr(2,3)-1
gw(2,3,gr(2,3)-1)
return 4
def _4():
global t0
return (3)if((t0)!=0)else(5)
def _5():
sa(sp()-1)
sa(sr());
sa((0)if(sp()!=0)else(1))
return 1
def _6():
global t0
gw(3,3,199)
t0=gr((gr(3,3)%100)+1,gr(3,3)/100)-48
sp();
return 7
def _7():
global t1
t1=gr(3,3)
gw(3,3,gr(3,3)-1)
return (9)if((t1)!=0)else(8)
def _8():
global t0
sys.stdout.write(str(t0)+" ")
sys.stdout.flush()
return 10
def _9():
global t0
t0=t0+(gr((gr(3,3)%100)+1,gr(3,3)/100)-48)
return 7
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9]
c=0
while c<10:
c=m[c]()
|
dvdmena/BioFormatsRead | test_get_timeseries.py | Python | bsd-2-clause | 2,218 | 0.003607 | import bfimage as bf
from matplotlib import pyplot as plt, cm
import os
import numpy as np
filename = r'testdata/T=5_Z=3_CH=2_CZT_All_CH_per_Slice.czi'
imgbase = os.path.basename(filename)
imgdir = os.path.dirname(filename)
# specify bioformats_package.jar to use if required
#bf.set_bfpath(insert path to bioformats_packe.jar here)
# get image meta-information
MetaInfo = bf.bftools.get_relevant_metainfo_wrapper(filename)
seriesID = 0
timepoint = 2
channel = 1
zplane = 2
# get the actual time series from the data set
tseries = bf.bftools.get_timeseries(filename, MetaInfo['Sizes'], seriesID, zplane)
# show relevant image Meta-Information
print '\n'
print 'Image Directory : ', imgdir
print 'Image Filename : ', imgbase
print 'Images Dim Sizes : ', MetaInfo['Sizes']
print 'Dimension Order BF : ', MetaInfo['DimOrder BF']
print 'Dimension Order CZI : ', MetaInfo['OrderCZI']
print 'Total Series Number : ', MetaInfo['TotalSeries']
print 'Image Dimensions : ', MetaInfo['TotalSeries'], MetaInfo['SizeT'], MetaInfo['SizeZ'], MetaInfo['SizeC'],\
MetaInfo['SizeY'], MetaInfo['SizeX']
print 'Scaling XYZ [micron] : ', MetaInfo['XScale'], MetaInfo['YScale'], MetaInfo['ZScale']
print 'Objective M-NA-Imm : ', MetaInfo['ObjMag'], MetaInfo['NA'], MetaInfo['Immersion']
print 'Objective Name : ', MetaInfo['ObjModel']
print 'Detector Name : ', MetaInfo['DetName']
print 'Ex. Wavelengths [nm] : ', MetaInfo['WLEx']
print 'Em. Wavelengths [nm] : ', MetaInfo['WLEm']
print 'Dyes : ', MetaInfo['Dyes']
print 'Channel Description : ', MetaInfo['ChDesc']
print '============================================================='
print 'Shape Time Series : ', np.shape(tseries)
img2show = tseries[timepoint, chan | nel, :, :]
fig1 = plt.figure(figsize=(10, 8), dpi=100)
ax1 = fig1.add_subplot(111)
cax = ax1.imshow(img2show, interpolation='nearest', cmap=cm.gray, aspect='equal')
ax1.set_title('T=' + str(timepoint+1) + ' Z=' + str(zplane+1) + ' CH=' + str(channel+1), fontsize=12 | )
ax1.set_xlabel('X-dimension [pixel]', fontsize=10)
ax1.set_ylabel('Y-dimension [pixel]', fontsize=10)
cbar = fig1.colorbar(cax)
# show plots
plt.show()
|
deuscoin-org/deuscoin-core | qa/rpc-tests/timestampindex.py | Python | mit | 1,959 | 0.001021 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Deuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test timestampindex generation and fetching
#
import time
from test_framework.test_framework im | port DeuscoinTestFramework
from test_framework.util import *
class TimestampIndexTest(DeuscoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
| self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-timestampindex"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-timestampindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining 25 blocks..."
blockhashes = self.nodes[0].generate(25)
time.sleep(3)
print "Mining 25 blocks..."
blockhashes.extend(self.nodes[0].generate(25))
time.sleep(3)
print "Mining 25 blocks..."
blockhashes.extend(self.nodes[0].generate(25))
self.sync_all()
low = self.nodes[1].getblock(blockhashes[0])["time"]
high = low + 76
print "Checking timestamp index..."
hashes = self.nodes[1].getblockhashes(high, low)
assert_equal(len(hashes), len(blockhashes))
assert_equal(hashes, blockhashes)
print "Passed\n"
if __name__ == '__main__':
TimestampIndexTest().main()
|
SamuelLongchamps/grammalecte | gc_core/py/gc_engine.py | Python | gpl-3.0 | 17,150 | 0.008464 | # -*- | encoding: UTF-8 -*-
import re
import sys
import os
import traceback
from ..ibdawg import IBDAWG
from ..echo import echo
from . import gc_options
__all__ = [ "lang", "locales", "pkg", "name", "version", "author", \
"load", "parse", "get | Dictionary", \
"setOptions", "getOptions", "getOptionsLabels", "resetOptions", \
"ignoreRule", "resetIgnoreRules" ]
__version__ = u"${version}"
lang = u"${lang}"
locales = ${loc}
pkg = u"${implname}"
name = u"${name}"
version = u"${version}"
author = u"${author}"
# commons regexes
_zEndOfSentence = re.compile(u'([.?!:;…][ .?!… »”")]*|.$)')
_zBeginOfParagraph = re.compile(u"^\W*")
_zEndOfParagraph = re.compile(u"\W*$")
_zNextWord = re.compile(u" +(\w[\w-]*)")
_zPrevWord = re.compile(u"(\w[\w-]*) +$")
# grammar rules and dictionary
_rules = None
_dOptions = dict(gc_options.dOpt) # duplication necessary, to be able to reset to default
_aIgnoredRules = set()
_oDict = None
_dAnalyses = {} # cache for data from dictionary
_GLOBALS = globals()
#### Parsing
def parse (sText, sCountry="${country_default}", bDebug=False, dOptions=None):
"analyses the paragraph sText and returns list of errors"
aErrors = None
sAlt = sText
dDA = {}
dOpt = _dOptions if not dOptions else dOptions
# parse paragraph
try:
sNew, aErrors = _proofread(sText, sAlt, 0, True, dDA, sCountry, dOpt, bDebug)
if sNew:
sText = sNew
except:
raise
# parse sentences
for iStart, iEnd in _getSentenceBoundaries(sText):
if 4 < (iEnd - iStart) < 2000:
dDA.clear()
try:
_, errs = _proofread(sText[iStart:iEnd], sAlt[iStart:iEnd], iStart, False, dDA, sCountry, dOpt, bDebug)
aErrors.extend(errs)
except:
raise
return aErrors
def _getSentenceBoundaries (sText):
iStart = _zBeginOfParagraph.match(sText).end()
for m in _zEndOfSentence.finditer(sText):
yield (iStart, m.end())
iStart = m.end()
def _proofread (s, sx, nOffset, bParagraph, dDA, sCountry, dOptions, bDebug):
aErrs = []
bChange = False
if not bParagraph:
# after the first pass, we modify automatically some characters
if u" " in s:
s = s.replace(u" ", u' ') # nbsp
bChange = True
if u" " in s:
s = s.replace(u" ", u' ') # nnbsp
bChange = True
if u"@" in s:
s = s.replace(u"@", u' ')
bChange = True
if u"'" in s:
s = s.replace(u"'", u"’")
bChange = True
if u"‑" in s:
s = s.replace(u"‑", u"-") # nobreakdash
bChange = True
bIdRule = option('idrule')
for sOption, lRuleGroup in _getRules(bParagraph):
if not sOption or dOptions.get(sOption, False):
for zRegex, bUppercase, sRuleId, lActions in lRuleGroup:
if sRuleId not in _aIgnoredRules:
for m in zRegex.finditer(s):
for sFuncCond, cActionType, sWhat, *eAct in lActions:
# action in lActions: [ condition, action type, replacement/suggestion/action[, iGroup[, message, URL]] ]
try:
if not sFuncCond or _GLOBALS[sFuncCond](s, sx, m, dDA, sCountry):
if cActionType == "-":
# grammar error
# (text, replacement, nOffset, m, iGroup, sId, bUppercase, sURL, bIdRule)
aErrs.append(_createError(s, sWhat, nOffset, m, eAct[0], sRuleId, bUppercase, eAct[1], eAct[2], bIdRule, sOption))
elif cActionType == "~":
# text processor
s = _rewrite(s, sWhat, eAct[0], m, bUppercase)
bChange = True
if bDebug:
echo(u"~ " + s + " -- " + m.group(eAct[0]) + " # " + sRuleId)
elif cActionType == "=":
# disambiguation
_GLOBALS[sWhat](s, m, dDA)
if bDebug:
echo(u"= " + m.group(0) + " # " + sRuleId + "\nDA: " + str(dDA))
else:
echo("# error: unknown action at " + sRuleId)
except Exception as e:
raise Exception(str(e), sRuleId)
if bChange:
return (s, aErrs)
return (False, aErrs)
def _createWriterError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error for Writer (LO/OO)"
xErr = SingleProofreadingError()
#xErr = uno.createUnoStruct( "com.sun.star.linguistic2.SingleProofreadingError" )
xErr.nErrorStart = nOffset + m.start(iGroup)
xErr.nErrorLength = m.end(iGroup) - m.start(iGroup)
xErr.nErrorType = PROOFREADING
xErr.aRuleIdentifier = sId
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, sugg.split("|")))
else:
xErr.aSuggestions = tuple(sugg.split("|"))
else:
xErr.aSuggestions = ()
elif sRepl == "_":
xErr.aSuggestions = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
xErr.aSuggestions = tuple(map(str.capitalize, m.expand(sRepl).split("|")))
else:
xErr.aSuggestions = tuple(m.expand(sRepl).split("|"))
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
xErr.aShortComment = sMessage # sMessage.split("|")[0] # in context menu
xErr.aFullComment = sMessage # sMessage.split("|")[-1] # in dialog
if bIdRule:
xErr.aShortComment += " # " + sId
# URL
if sURL:
p = PropertyValue()
p.Name = "FullCommentURL"
p.Value = sURL
xErr.aProperties = (p,)
else:
xErr.aProperties = ()
return xErr
def _createDictError (s, sRepl, nOffset, m, iGroup, sId, bUppercase, sMsg, sURL, bIdRule, sOption):
"error as a dictionary"
dErr = {}
dErr["nStart"] = nOffset + m.start(iGroup)
dErr["nEnd"] = nOffset + m.end(iGroup)
dErr["sRuleId"] = sId
dErr["sType"] = sOption if sOption else "notype"
# suggestions
if sRepl[0:1] == "=":
sugg = _GLOBALS[sRepl[1:]](s, m)
if sugg:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, sugg.split("|")))
else:
dErr["aSuggestions"] = sugg.split("|")
else:
dErr["aSuggestions"] = ()
elif sRepl == "_":
dErr["aSuggestions"] = ()
else:
if bUppercase and m.group(iGroup)[0:1].isupper():
dErr["aSuggestions"] = list(map(str.capitalize, m.expand(sRepl).split("|")))
else:
dErr["aSuggestions"] = m.expand(sRepl).split("|")
# Message
if sMsg[0:1] == "=":
sMessage = _GLOBALS[sMsg[1:]](s, m)
else:
sMessage = m.expand(sMsg)
dErr["sMessage"] = sMessage
if bIdRule:
dErr["sMessage"] += " # " + sId
# URL
dErr["URL"] = sURL if sURL else ""
return dErr
def _rewrite (s, sRepl, iGroup, m, bUppercase):
"text processor: write sRepl in s at iGroup position"
ln = m.end(iGroup) - m.start(iGroup)
if sRepl == "*":
sNew = " " * ln
elif sRepl == ">" or sRepl == "_" or sRepl == u"~":
sNew = sRepl + " " * (ln-1)
elif sRepl == "@":
sNew = "@" * ln
|
evan-erdos/cboe | pkg/win/gen-data.py | Python | gpl-2.0 | 1,087 | 0.015639 |
from os.path import normpath as makepath
from glob import glob
import sys
root = makepath(sys.argv[1])
# To add directories to the list of sources to generate file lists from,
# simply edit this dictionary. The key is the directory path (relative to
# the build output directory), and the value is either a glob pattern to include,
# or an explicit list of files to include.
# All paths should be in UNIX format, since makepath() will be used to convert them.
# There should be no trailing slashes.
files = {
'data/dialogs': '*.xml',
'data/strings': '*.txt',
'data/fonts': '*.ttf',
'data/shad | ers': ['mask.frag', 'mask.vert'],
'data/graphics': '*.png',
'data/cursors': '*.gif',
'data/sounds': '*.WAV',
}
for path, pattern in files.items():
print 'SetOutPath', '"' + makep | ath("$INSTDIR/" + path + '/') + '"'
if type(pattern) == list:
check_files = [root + '/' + path + '/' + x for x in pattern]
else:
check_files = glob(makepath(root + '/' + path + '/' + pattern))
for fname in check_files:
print 'File', '"' + makepath(fname.replace(root, '${RELEASE_DIR}')) + '"'
|
google-code-export/cocoslive | cocoslive/configuration.py | Python | gpl-3.0 | 1,082 | 0.000924 | #!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""System-wide configuration variables."""
import datetime
# This HTML block will be printed in the footer of every page.
FOOTER_HTML = (
'coc | os Live v0.3.6 - © 2009 <a href="http://www.sapusmedia.com">Sapus Media</a>'
)
# File caching controls
FILE_CACHE_CONTROL = 'private, max-age=86400'
FILE_CACHE_TIME = datetime.timedelta(days=1)
# Title for the website
SYSTEM_TITLE = 'cocos Live'
# Un | ique identifier from Google Analytics
ANALYTICS_ID = 'UA-871936-6'
|
certik/sympy-oldcore | sympy/plotting/pyglet/media/avbin.py | Python | bsd-3-clause | 16,204 | 0.00253 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2007 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Use avbin to decode audio and video media.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: avbin.py 1199 2007-08-27 11:56:07Z Alex.Holkner $'
from pyglet.media import (MediaFormatException, StreamingSource,
VideoFormat, AudioFormat, AudioData)
from pyglet import gl
from pyglet.gl import gl_info
from pyglet import image
import pyglet.lib
import ctypes
av = pyglet.lib.load_library('avbin',
darwin='/usr/lib/libavbin.dylib')
AVBIN_RESULT_ERROR = -1
AVBIN_RESULT_OK = 0
AVbinResult = ctypes.c_int
AVBIN_STREAM_TYPE_UNKNOWN = 0
AVBIN_STREAM_TYPE_VIDEO = 1
AVBIN_STREAM_TYPE_AUDIO = 2
AVbinStreamType = ctypes.c_int
AVBIN_SAMPLE_FORMAT_U8 = 0
AVBIN_SAMPLE_FORMAT_S16 = 1
AVBIN_SAMPLE_FORMAT_S24 = 2
AVBIN_SAMPLE_FORMAT_S32 = 3
AVBIN_SAMPLE_FORMAT_FLOAT = 4
AVbinSampleFormat = ctypes.c_int
AVBIN_LOG_QUIET = -8
AVBIN_LOG_PANIC = 0
AVBIN_LOG_FATAL = 8
AVBIN_LOG_ERROR = 16
AVBIN_LOG_WARNING = 24
AVBIN_LOG_INFO = 32
AVBIN_LOG_VERBOSE = 40
AVBIN_LOG_DEBUG = 48
AVbinLogLevel = ctypes.c_int
AVbinFileP = ctypes.c_void_p
AVbinStreamP = ctypes.c_void_p
Timestamp = ctypes.c_int64
class AVbinFileInfo(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('n_streams', ctypes.c_int),
('start_time', Timestamp),
('duration', Timestamp),
('title', ctypes.c_char * 512),
('author', ctypes.c_char * 512),
('copyright', ctypes.c_char * 512),
('comment', ctypes.c_char * 512),
('album', ctypes.c_char * 512),
('year', ctypes.c_int),
('track', ctypes.c_int),
('genre', ctypes.c_char * 32),
]
class _AVbinStreamInfoVideo(ctypes.Structure):
_fields_ = [
('width', ctypes.c_uint),
('height', ctypes.c_uint),
('sample_aspect_num', ctypes.c_int),
('sample_aspect_den', ctypes.c_int),
]
class _AVbinStreamInfoAudio(ctypes.Structure):
_fields_ = [
('sample_format', ctypes.c_int),
('sample_rate', ctypes.c_uint),
('sample_bits', ctypes.c_uint),
('channels', ctypes.c_uint),
]
class _AVbinStreamInfoUnion(ctypes.Union):
_fields_ = [
('video', _AVbinStreamInfoVideo),
('audio', _AVbinStreamInfoAudio),
]
class AVbinStreamInfo(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('type', ctypes.c_int),
('u', _AVbinStreamInfoUnion)
]
class AVbinPacket(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('timestamp', Timestamp),
('stream_index', ctypes.c_int),
('data', ctypes.POINTER(ctypes.c_uint8)),
('size', ctypes.c_size_t),
]
AVbinLogCallback = ctypes.CFUNCTYPE(None,
ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p)
av.avbin_get_version.restype = ctypes.c_int
av.avbin_get_ffmpeg_revision.restype = ctypes.c_int
av.avbin_get_audio_buffer_size.restype = ctypes.c_size_t
av.avbin_have_feature.restype = ctypes.c_int
av.avbin_have_feature.argtypes = [ctypes.c_char_p]
av.avbin_init.restype = AVbinResult
av.avbin_set_log_level.restype = AVbinResult
av.avbin_set_log_level.argtypes = [AVbinLogLevel]
av.avbin_set_log_callback.argtypes = [AVbinLogCallback]
av.avbin_open_filename.restype = AVbinFileP
av.avbin_open_filename.argtypes = [ctypes.c_char_p]
av.avbin_close_file.argtypes = [AVbinFileP]
av.avbin_seek_file.argtypes = [AVbinFileP, Timestamp]
av.avbin_file_info.argtypes = [AVbinFileP, ctypes.POINTER(AVbinFileInfo)]
av.avbin_stream_info.argtypes = [AVbinFileP, ctypes.c_int,
ctypes.POINTER(AVbinStreamInfo)]
av.avbin_open_stream.restype = ctypes.c_void_p
av.avbin_open_stream.argtypes = [AVbinFileP, ctypes.c_int]
av.avbin_close_stream.argtypes = [AVbinStreamP]
av.avbin_read.argtypes = [AVbinFileP, ctypes.POINTER(AVbinPacket)]
av.avbin_read.restype = AVbinResult
av.avbin_decode_audio.restype = ctypes.c_int
av.avbin_decode_audio.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)]
av.avbin_decode_video.restype = ctypes.c_int
av.avbin_decode_video.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p]
def get_version():
return av.avbin_get_version()
class AVbinException(MediaFormatException):
pass
def timestamp_from_avbin(timestamp):
return float(timestamp) / 1000000
def timestamp_to_avbin(timestamp):
return int(timestamp * 1000000)
class BufferedPacket(object):
def __init__(self, packet):
self.timestamp = packet.timestamp
self.stream_index = packet.stream_index
| self.data = (ctypes.c_uint8 * packet.size)()
self.size = packet.size
ctypes.memmove(self.data, packet.data, self.size)
| class BufferedImage(object):
def __init__(self, image, timestamp):
self.image = image
self.timestamp = timestamp
class AVbinSource(StreamingSource):
def __init__(self, filename, file=None):
if file is not None:
raise NotImplementedError('TODO: Load from file stream')
self._file = av.avbin_open_filename(filename)
if not self._file:
raise AVbinException('Could not open "%s"' % filename)
self._video_stream = None
self._audio_stream = None
file_info = AVbinFileInfo()
file_info.structure_size = ctypes.sizeof(file_info)
av.avbin_file_info(self._file, ctypes.byref(file_info))
self._duration = timestamp_from_avbin(file_info.duration)
# Pick the first video and audio streams found, ignore others.
for i in range(file_info.n_streams):
info = AVbinStreamInfo()
info.structure_size = ctypes.sizeof(info)
av.avbin_stream_info(self._file, i, info)
if (info.type == AVBIN_STREAM_TYPE_VIDEO and
not self._video_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.video_format = VideoFormat(
width=info.u.video.width,
height=info.u.video.height)
if info.u.video.sample_aspect_num != 0:
self.video_format.sample_aspect = (
float(info.u.video.sample_aspect_num) /
info.u.video.sample_aspect_den)
self._video_ |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-2.1/Lib/nntplib.py | Python | mit | 18,078 | 0.002157 | """An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
apply(Exception.__init__, (self,)+args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific comamnds, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary | to enable 'reader' mod | e.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', `self.welcome`
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', `line`
self.sock.send(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', `line`
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print '*get*', `line`
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', `resp`
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
list.append(line)
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp()
def newgroups(self, date, time):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of news |
dudunato/ywam-cerrado | demo/migrations/0008_auto_20150509_0023.py | Python | bsd-3-clause | 922 | 0.002169 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class | Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0005_make_filter_spec_unique'),
('demo', '0007_auto_20150508_1856'),
]
operations = [
migrations.AddField(
model_name='formpage',
name='feed_image',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, bla | nk=True, to='wagtailimages.Image', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='formpage',
name='header_image',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailimages.Image', null=True),
preserve_default=True,
),
]
|
a10networks/acos-client | acos_client/v30/dns.py | Python | apache-2.0 | 1,880 | 0 | # Copyright (C) 2016, A10 Networks Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from acos_client.v30 import base
class DNS(base.BaseV30):
url_prefix = "/ip/dns/"
def _set_dns(self, precedence, addr):
addr_spec = {}
if ':' in addr:
addr_spec['ip-v6-addr'] = addr
else:
addr_spec['ip-v4-addr'] = addr
payload = {precedence: | addr_spec}
return self._post(self.url_prefix + precedence, payload)
def _set_suffix(self, suffix):
payload = {'suffix': {'domain-name': suffix}}
return self._post(self.url_prefix | + 'suffix', payload)
def set(self, primary=None, secondary=None, suffix=None):
if primary is not None:
self._set_dns('primary', primary)
if secondary is not None:
self._set_dns('secondary', secondary)
if suffix is not None:
self._set_suffix(suffix)
def delete(self, primary=None, secondary=None, suffix=None):
if suffix is not None:
self._delete(self.url_prefix + 'suffix')
if secondary is not None:
self._delete(self.url_prefix + 'secondary')
if primary is not None:
self._delete(self.url_prefix + 'primary')
|
facetothefate/contrail-controller | src/opserver/analytics_db.py | Python | apache-2.0 | 14,728 | 0.004753 | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
#
# analytics_db.py
# Implementation of database purging
#
import redis
import pycassa
from pycassa.pool import ConnectionPool
from pycassa.columnfamily import ColumnFamily
from pycassa.types import *
from pycassa import *
from sandesh.viz.constants import *
from sandesh.viz.constants import _NO_AUTO_PURGE_TABLES, \
_FLOW_TABLES, _STATS_TABLES, _MSG_TABLES
from pysandesh.util import UTCTimestampUsec
import code
import urllib | 2
import time
import json
import | datetime
import pdb
import argparse
import socket
import struct
class AnalyticsDb(object):
def __init__(self, logger, cassandra_server_list,
redis_query_port, redis_password, cassandra_user,
cassandra_password):
self._logger = logger
self._cassandra_server_list = cassandra_server_list
self._redis_query_port = redis_query_port
self._redis_password = redis_password
self._pool = None
self._cassandra_user = cassandra_user
self._cassandra_password = cassandra_password
self.connect_db()
self.number_of_purge_requests = 0
# end __init__
def connect_db(self):
try:
creds=None
if self._cassandra_user is not None and \
self._cassandra_password is not None:
creds = {'username':self._cassandra_user,
'password':self._cassandra_password}
self._pool = ConnectionPool(COLLECTOR_KEYSPACE,
server_list=self._cassandra_server_list, timeout=None,
credentials=creds)
except Exception as e:
self._logger.error("Exception: Failure in connection to "
"AnalyticsDb %s" % e)
return -1
return None
# end connect_db
def _get_sysm(self):
creds=None
if self._cassandra_user is not None and \
self._cassandra_password is not None:
creds = {'username':self._cassandra_user,
'password':self._cassandra_password}
for server_and_port in self._cassandra_server_list:
try:
sysm = pycassa.system_manager.SystemManager(server_and_port,
credentials=creds)
except Exception as e:
self._logger.error("Exception: SystemManager failed %s" % e)
continue
else:
return sysm
return None
# end _get_sysm
def _get_analytics_start_time(self):
try:
col_family = ColumnFamily(self._pool, SYSTEM_OBJECT_TABLE)
row = col_family.get(SYSTEM_OBJECT_ANALYTICS)
except Exception as e:
self._logger.error("Exception: analytics_start_time Failure %s" % e)
return None
# Initialize the dictionary before returning
if (SYSTEM_OBJECT_START_TIME not in row):
return None
if (SYSTEM_OBJECT_FLOW_START_TIME not in row):
row[SYSTEM_OBJECT_FLOW_START_TIME] = row[SYSTEM_OBJECT_START_TIME]
if (SYSTEM_OBJECT_STAT_START_TIME not in row):
row[SYSTEM_OBJECT_STAT_START_TIME] = row[SYSTEM_OBJECT_START_TIME]
if (SYSTEM_OBJECT_MSG_START_TIME not in row):
row[SYSTEM_OBJECT_MSG_START_TIME] = row[SYSTEM_OBJECT_START_TIME]
return row
# end _get_analytics_start_time
def _update_analytics_start_time(self, start_times):
try:
col_family = ColumnFamily(self._pool, SYSTEM_OBJECT_TABLE)
col_family.insert(SYSTEM_OBJECT_ANALYTICS, start_times)
except Exception as e:
self._logger.error("Exception: update_analytics_start_time "
"Connection Failure %s" % e)
# end _update_analytics_start_time
def set_analytics_db_purge_status(self, purge_id, purge_cutoff):
try:
redish = redis.StrictRedis(db=0, host='127.0.0.1',
port=self._redis_query_port, password=self._redis_password)
redish.hset('ANALYTICS_DB_PURGE', 'status', 'running')
redish.hset('ANALYTICS_DB_PURGE', 'purge_input', str(purge_cutoff))
redish.hset('ANALYTICS_DB_PURGE', 'purge_start_time',
UTCTimestampUsec())
redish.hset('ANALYTICS_DB_PURGE', 'purge_id', purge_id)
except redis.exceptions.ConnectionError:
self._logger.error("Exception: "
"Failure in connection to redis-server")
response = {'status': 'failed',
'reason': 'Failure in connection to redis-server'}
return response
except redis.exceptions.ResponseError:
self._logger.error("Exception: "
"Redis authentication failed")
response = {'status': 'failed',
'reason': 'Redis authentication failed'}
return response
return None
# end set_analytics_db_purge_status
def delete_db_purge_status(self):
try:
redish = redis.StrictRedis(db=0, host='127.0.0.1',
port=self._redis_query_port, password=self._redis_password)
redish.delete('ANALYTICS_DB_PURGE')
except redis.exceptions.ConnectionError:
self._logger.error("Exception: "
"Failure in connection to redis-server")
except redis.exceptions.ResponseError:
self._logger.error("Exception: "
"Redis authentication failed")
# end delete_db_purge_status
def get_analytics_db_purge_status(self, redis_list):
for redis_ip_port in redis_list:
try:
redish = redis.StrictRedis(redis_ip_port[0],
redis_ip_port[1], db=0,
password=self._redis_password)
if (redish.exists('ANALYTICS_DB_PURGE')):
return redish.hgetall('ANALYTICS_DB_PURGE')
except redis.exceptions.ConnectionError:
self._logger.error("Exception: "
"Failure in connection to redis-server")
response = {'status': 'failed',
'reason': 'Failure in connection to redis-server: '
+ redis_ip_port[0]}
return response
except redis.exceptions.ResponseError:
self._logger.error("Exception: "
"Redis authentication failed")
response = {'status': 'failed',
'reason': 'Redis authentication failed'}
return response
return None
# end get_analytics_db_purge_status
def db_purge(self, purge_cutoff, purge_id):
total_rows_deleted = 0 # total number of rows deleted
purge_error_details = []
if (self._pool == None):
self.connect_db()
if not self._pool:
self._logger.error('Connection to AnalyticsDb has Timed out')
purge_error_details.append('Connection to AnalyticsDb has Timed out')
return (-1, purge_error_details)
sysm = self._get_sysm()
if (sysm == None):
self._logger.error('Failed to connect SystemManager')
purge_error_details.append('Failed to connect SystemManager')
return (-1, purge_error_details)
try:
table_list = sysm.get_keyspace_column_families(COLLECTOR_KEYSPACE)
except Exception as e:
self._logger.error("Exception: Purge_id %s Failed to get "
"Analytics Column families %s" % (purge_id, e))
purge_error_details.append("Exception: Failed to get "
"Analytics Column families %s" % (e))
return (-1, purge_error_details)
# delete entries from message table
msg_table = COLLECTOR_GLOBAL_TABLE
# total number of rows deleted from this table
|
Geoion/pomegranate | tests/test_profile_hmm.py | Python | mit | 17,293 | 0.059099 | from __future__ import (division, print_function)
from pomegranate import *
from nose.tools import with_setup
from nose.tools import assert_equal
from nose.tools import assert_not_equal
import random
import numpy as np
def setup():
'''
Build a model that we want to use to test sequences. This model will
be somewhat complicated, in order to extensively test YAHMM. This will be
a three state global sequence alignment HMM. The HMM models a reference of
'ACT', with pseudocounts to allow for slight deviations from this
reference.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transition( model.start, m1, 0.9 )
model.add_transition( model.start, i0, 0.1 )
model.add_transition( m1, m2, 0.9 )
model.add_transition( m1, i1, 0.05 )
model.add_transition( m1, d2, 0.05 )
model.add_transition( m2, m3, 0.9 )
model.add_transition( m2, i2, 0.05 )
model.add_transition( m2, d3, 0.05 )
model.add_transition( m3, model.end, 0.9 )
model.add_transition( m3, i3, 0.1 )
# Create transitions from insert states
model.add_transition( i0, i0, 0.70 )
model.add_transition( i0, d1, 0.15 )
model.add_transition( i0, m1, 0.15 )
model.add_transition( i1, i1, 0.70 )
model.add_transition( i1, d2, 0.15 )
model.add_transition( i1, m2, 0.15 )
model.add_transition( i2, i2, 0.70 )
model.add_transition( i2, d3, 0.15 )
model.add_transition( i2, m3, 0.15 )
model.add_transition( i3, i3, 0.85 )
model.add_transition( i3, model.end, 0.15 )
# Create transitions from delete states
model.add_transiti | on( d1, d2, 0.15 )
model.add_transition( d1, i1, 0.15 )
model.add_transition( d1, m2, 0.70 )
model.add_transition( d2, d3, 0.15 )
model.add_transition( d2, i2, 0.15 )
model.add_transition( d2, m3, 0.70 )
model.add_transition( d3, i3, 0.30 )
model.add_transition( d3, model.end, 0.70 )
# Call bake to finalize the structure of the model.
model.bake()
def multitransition_setup():
'''
Build a model that we want to use to test sequences. This is the same as the
| above model, except that it uses the multiple transition methods for building.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transitions( model.start, [m1, i0], [0.9, 0.1] )
model.add_transitions( m1, [m2, i1, d2], [0.9, 0.05, 0.05] )
model.add_transitions( m2, [m3, i2, d3], [0.9, 0.05, 0.05] )
model.add_transitions( m3, [model.end, i3], [0.9, 0.1] )
# Create transitions from insert states
model.add_transitions( i0, [i0, d1, m1], [0.7, 0.15, 0.15] )
model.add_transitions( i1, [i1, d2, m2], [0.7, 0.15, 0.15] )
model.add_transitions( i2, [i2, d3, m3], [0.7, 0.15, 0.15] )
model.add_transitions( [i3, i3], [i3, model.end], [0.85, 0.15] )
# Create transitions from delete states
model.add_transitions( d1, [d2, i1, m2], [0.15, 0.15, 0.70] )
model.add_transitions( [d2, d2, d2, d3, d3], [d3, i2, m3, i3, model.end],
[0.15, 0.15, 0.70, 0.30, 0.70 ] )
# Call bake to finalize the structure of the model.
model.bake()
def tied_edge_setup():
'''
Build a model that we want to use to test sequences. This model has
tied edges.
'''
random.seed(0)
global model
model = HiddenMarkovModel( "Global Alignment")
# Define the distribution for insertions
i_d = DiscreteDistribution( { 'A': 0.25, 'C': 0.25, 'G': 0.25, 'T': 0.25 } )
# Create the insert states
i0 = State( i_d, name="I0" )
i1 = State( i_d, name="I1" )
i2 = State( i_d, name="I2" )
i3 = State( i_d, name="I3" )
# Create the match states
m1 = State( DiscreteDistribution({ "A": 0.95, 'C': 0.01, 'G': 0.01, 'T': 0.02 }) , name="M1" )
m2 = State( DiscreteDistribution({ "A": 0.003, 'C': 0.99, 'G': 0.003, 'T': 0.004 }) , name="M2" )
m3 = State( DiscreteDistribution({ "A": 0.01, 'C': 0.01, 'G': 0.01, 'T': 0.97 }) , name="M3" )
# Create the delete states
d1 = State( None, name="D1" )
d2 = State( None, name="D2" )
d3 = State( None, name="D3" )
# Add all the states to the model
model.add_states( [i0, i1, i2, i3, m1, m2, m3, d1, d2, d3 ] )
# Create transitions from match states
model.add_transition( model.start, m1, 0.9 )
model.add_transition( model.start, i0, 0.1 )
model.add_transition( m1, m2, 0.9 )
model.add_transition( m1, i1, 0.05 )
model.add_transition( m1, d2, 0.05 )
model.add_transition( m2, m3, 0.9 )
model.add_transition( m2, i2, 0.05 )
model.add_transition( m2, d3, 0.05 )
model.add_transition( m3, model.end, 0.9 )
model.add_transition( m3, i3, 0.1 )
# Create transitions from insert states
model.add_transition( i0, i0, 0.70, group="i_a" )
model.add_transition( i0, d1, 0.15, group="i_b" )
model.add_transition( i0, m1, 0.15, group="i_c" )
model.add_transition( i1, i1, 0.70, group="i_a" )
model.add_transition( i1, d2, 0.15, group="i_b" )
model.add_transition( i1, m2, 0.15, group="i_c" )
model.add_transition( i2, i2, 0.70, group="i_a" )
model.add_transition( i2, d3, 0.15, group="i_b" )
model.add_transition( i2, m3, 0.15, group="i_c" )
model.add_transition( i3, i3, 0.85, group="i_a" )
model.add_transition( i3, model.end, 0.15 )
# Create transitions from delete states
model.add_transition( d1, d2, 0.15, group="d_a" )
model.add_transition( d1, i1, 0.15, group="d_b" )
model.add_transition( d1, m2, 0.70, group="d_c" )
model.add_transition( d2, d3, 0.15, group="d_a" )
model.add_transition( d2, i2, 0.15, group="d_b" )
model.add_transition( d2, m3, 0.70, group="d_c" )
model.add_transition( d3, i3, 0.30 )
model.add_transition( d3, model.end, 0.70 )
# Call bake to finalize the structure of the model.
model.bake()
def teardown():
'''
Remove the model at the end of the unit testing. Since it is stored in a
global variance, simply delete it.
'''
pass
@with_setup( setup, teardown )
def test_same_length_viterbi():
scores = [ -0.5132449003570658, -11.048101241343396, -9.125519674022627,
-5.0879558788604475 ]
sequences = [ list(x) for x in [ 'ACT', 'GGC', 'GAT', 'ACC' ] ]
for seq, score in zip( sequences, scores ):
assert_equal( model.viterbi( seq )[0], score )
assert_equal( str( model.viterbi( list('XXX') ) ), "(-inf, None)" )
@with_setup( setup, teardown )
def test_variable_length_viterbi():
scores = [ -5.406181012423981, -10.88681993576597, -3.6244718790494277,
-3.644880750680635, -10.674332964640293, -10.393824835172445,
-8.67126440174503, -16.903451796110275, -16.451699654050792 ]
sequences = [ list(x) for x in ('A', 'GA', 'AC', 'AT', 'ATCC',
'ACGTG', 'ATTT', 'TACCCTC', 'TGTCAACA |
guker/spear | config/tools/isv/isv_512g_u100.py | Python | gpl-3.0 | 542 | 0.001845 | #!/usr/bin/env python
import spear
# 1/ The tool
tool = spear.tools.ISVTool
# 2/ GMM Training
n_gaussians = 512
iterk = 25
iterg_train = 25
end_acc = 0.0001
var_thd = 0.0001
update_weights = True
update_means = True
update_variances = True
norm_KMeans = True
# 3/ JFA Training
ru = 100 # The dimensionality of the subspace
rele | vance_factor = 4
n_iter_train = 10
n_iter_enrol = 1
# 4/ JFA Enrolment and scoring
iterg_enrol = 1
convergence_threshold = 0.0001
variance_threshold = 0.0001
relevance_factor = 4
re | sponsibilities_threshold = 0
|
unreal666/outwiker | src/outwiker/pages/wiki/wikiconfig.py | Python | gpl-3.0 | 5,052 | 0 | # -*- coding: utf-8 -*-
from outwiker.core.config import (BooleanOption,
IntegerOption,
StcStyleOption,
StringOption)
from outwiker.gui.stcstyle import StcStyle
class WikiConfig (object):
"""
Класс, хранящий указатели на настройки, связанные с викиы
"""
# Секция конфига для параметров, связанных с викистраницей
WIKI_SECTION = u"Wiki"
# Секция, куда записывать параметры стилей оформления редактора
STYLES_SECTION = u"EditorStyles"
# Имя параметра "Показывать ли код HTML?"
SHOW_HTML_CODE_PARAM = u"ShowHtmlCode"
# Имя параметра для размера превьюшек по умолчанию
THUMB_SIZE_PARAM = u"ThumbSize"
# Имя параметра, показывающего, надо ли выводить список
# прикрепленных файлов вместо пустой страницы
SHOW_ATTACH_BLANK_PARAM = u"ShowAttachInsteadBlank"
# Размер превьюшек по умолчанию
THUMB_SIZE_DEFAULT = 250
# Имя параметра "Стиль ссылок по умолчанию"
LINK_STYLE_PARAM = u"DefaultLinkStyle"
# Стиль ссылок по умолчанию ([[... -> ...]] или [[... | ...]])
LINK_STYLE_DEFAULT = 0
# Стили редактора
STYLE_LINK_PARAM = u"link"
STYLE_LINK_DEFAULT = StcStyle.parse(u"fore:#0000FF,underline")
STYLE_HEADING_PARAM = u"heading"
STYLE_HEADING_DEFAULT = StcStyle.parse(u"bold")
STYLE_COMMAND_PARAM = u"command"
STYLE_COMMAND_DEFAULT = StcStyle.parse(u"fore:#6A686B")
STYL | E_COMMENT_PARAM = u"comment"
STYLE_COMMENT_DEFAULT = StcStyle.parse(u"fore:#12B535")
COLORIZE_SYNTAX_PARAM = u'ColorizeSyntax'
COLORIZE_SYNTAX_DEFAULT = True
RECENT_STYLE_NAME_PARAM = 'RecentStyleName'
RECENT_STYLE_NAME_DEFAULT = ''
def __init__(self, config):
self.config = config
# Показывать вкладку с HTML-кодом?
self.showHtmlCodeOptions = BooleanOption(
self.conf | ig,
WikiConfig.WIKI_SECTION,
WikiConfig.SHOW_HTML_CODE_PARAM,
True)
# Размер превьюшек по умолчанию
self.thumbSizeOptions = IntegerOption(self.config,
WikiConfig.WIKI_SECTION,
WikiConfig.THUMB_SIZE_PARAM,
WikiConfig.THUMB_SIZE_DEFAULT)
# Показывать список прикрепленных файлов вместо пустой страницы?
self.showAttachInsteadBlankOptions = BooleanOption(
self.config,
WikiConfig.WIKI_SECTION,
WikiConfig.SHOW_ATTACH_BLANK_PARAM,
True)
# Стиль ссылок по умолчанию
self.linkStyleOptions = IntegerOption(self.config,
WikiConfig.WIKI_SECTION,
WikiConfig.LINK_STYLE_PARAM,
WikiConfig.LINK_STYLE_DEFAULT)
# Стили редактора
self.link = StcStyleOption(self.config,
WikiConfig.STYLES_SECTION,
WikiConfig.STYLE_LINK_PARAM,
WikiConfig.STYLE_LINK_DEFAULT)
self.heading = StcStyleOption(self.config,
WikiConfig.STYLES_SECTION,
WikiConfig.STYLE_HEADING_PARAM,
WikiConfig.STYLE_HEADING_DEFAULT)
self.command = StcStyleOption(self.config,
WikiConfig.STYLES_SECTION,
WikiConfig.STYLE_COMMAND_PARAM,
WikiConfig.STYLE_COMMAND_DEFAULT)
self.comment = StcStyleOption(self.config,
WikiConfig.STYLES_SECTION,
WikiConfig.STYLE_COMMENT_PARAM,
WikiConfig.STYLE_COMMENT_DEFAULT)
self.colorizeSyntax = BooleanOption(self.config,
self.WIKI_SECTION,
self.COLORIZE_SYNTAX_PARAM,
self.COLORIZE_SYNTAX_DEFAULT)
self.recentStyleName = StringOption(self.config,
self.WIKI_SECTION,
self.RECENT_STYLE_NAME_PARAM,
self.RECENT_STYLE_NAME_DEFAULT)
|
mrquim/mrquimrepo | script.module.nanscrapers/lib/nanscrapers/scraperplugins/projectfreetv.py | Python | gpl-2.0 | 2,949 | 0.015259 | import re
import requests
import xbmc
from ..scraper import Scraper
class projectfreetv(Scraper):
domains = ['project-free-tv.ag']
name = "ProjectFree"
sources = []
def __init__(self):
self.base_link = 'http://project-free-tv.ag'
self.search_movie = self.base_link+'/movies/search-form/?free='
self.search_tv = self.base_link+'/search-tvshows/?free='
def scrape_episode(self, title, show_year, year, season, episode, imdb, tvdb, debrid = False):
try:
start_url= self.search_tv+title.replace(' ','%20')+'%20season%20'+season
html = requests.get(start_url).content
match = re.compile('<td>.+?<a href="(.+?)".+?>(.+?)</a>',re.DOTALL).findall(html)
for url,name in match:
url = self.base_link+url
if title.lower() in name.lower():
seas = re.findall('season (.+?)>',str(name.lower())+'>')[0]
if seas == season:
html2 = requests.get(url).content
match2 = re.compile('<tr><td><a href="(.+?)" >(.+?)</a>',re.DOTALL).findall(html2)
for url2, name2 in match2:
episodes = re.findall('Episode (.+?)>',str(name2)+'>')
for episod in episodes:
if episode == episod:
self.get_source(url2)
return self.sources
except Exception as e:
print repr(e)
pass
return []
# def scrape_movie(self, title, year, imdb, debrid = False):
# try:
# | print 'hi'
# start_url= self.search_movie+title.replace(' ','%20')
# html = requests.get(start_url).content
# match = re | .compile('<div style="float:left.+?href="(.+?)" title="(.+?)"',re.DOTALL).findall(html)
# for url,name in match:
# movie_year = re.findall('\((.+?)\)',str(name))[0]
# url = self.base_link+url
# if movie_year == year:
# print year
# name = re.findall('(.+?) \(',str(name))[0]
# if title.lower() in name.lower():
# print name
# print url
# self.get_source(url)
# return self.sources
# except:
# pass
# return[]
def get_source(self,link):
try:
html = requests.get(link,timeout=3).content
m = re.compile('<td align="center">.+?onclick=.+?callvalue.+?,.+?,\'(.+?)\'',re.DOTALL).findall(html)
for u in m:
name = re.findall('//(.+?)/',str(u))[0]
self.sources.append({'source': name, 'quality': 'SD', 'scraper': self.name, 'url': u,'direct': False})
except:
pass
|
epruesse/ymp | src/ymp/cli/init.py | Python | gpl-3.0 | 3,545 | 0 | "Implements subcommands for ``ymp init``"
import logging
import os
import shutil
import subprocess as sp
import click
import ymp
from ymp.cli.shared_options import command, group
log = logging.getLogger(__name__) # pylint: disable=invalid-name
@group()
def init():
"""Initialize YMP workspace"""
def have_command(cmd):
try:
sp.run("command -v " + cmd, shell=True, stdout=sp.DEVNULL, check=True)
except sp.CalledProcessError:
log.debug("Command '%s' not found", cmd)
return False
log.debug("Command '%s' found", cmd)
return True
@init.command()
@click.option("--yes", "-y", is_flag=True, help="Confirm every prompt")
def cluster(yes):
"""Set up cluster"""
cfg = ymp.get_config()._config
if cfg.cluster.profile is not None and not yes:
click.confirm("Cluster profile '{}' already configured. "
"Do you want to overwrite this setting?"
"".format(cfg.cluster.profile),
abort=True)
log.warning("Trying to detect cluster type")
log.debug("Checking for SLURM")
if have_command("sbatch"):
log.warning("Found SLURM. Updating config.")
cfg["cluster"]["profile"] = "slurm"
elif have_command("qsub"):
log.warning("Detected SGE or Torque")
else:
log.warning("No cluster submit commands found")
cfg["cluster"]["profile"] = None
log.warning("Saving config")
cfg.save()
@init.command()
@click.argument("name", required=False)
@click.option("--yes", "-y", is_flag=Tr | ue, help="Confirm every prompt")
def project(name, | yes):
cfg = ymp.get_config()._config
if not name:
name = click.prompt("Please enter a name for the new project",
type=str)
if name in cfg.projects and not yes:
click.confirm("Project '{}' already configured. "
"Do you want to overwrite this project?"
"".format(name),
abort=True)
cfg.projects[name] = {'data': None}
log.warning("Saving config")
cfg.save()
@init.command()
def demo():
"""
Copies YMP tutorial data into the current working directory
"""
click.echo("Copying tutorial data to current working directory...")
cwd_path = os.getcwd()
cwd_files = os.listdir(cwd_path)
demo_path = os.path.join(ymp._rsc_dir, "demo")
demo_files = os.listdir(demo_path)
conflicts = [f for f in demo_files if f in cwd_files]
if len(cwd_files) > 10:
click.confirm(
"WARNING: "
"The current working directory already contains a lot of files.\n"
" Using an empty directory to start with is highly suggested.\n"
" Do you want to continue?", abort=True)
if conflicts:
click.confirm(
"WARNING: "
"This operation would overwrite the following files: {}\n"
" Do you want to continue?".format(conflicts), abort=True)
for f in conflicts:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.unlink(f)
for f in demo_files:
src = os.path.join(demo_path, f)
dst = os.path.join(cwd_path, f)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
click.echo("done.")
click.echo("")
click.echo("Try running 'ymp make toy.assemble_megahit.map_bbmap',")
click.echo("or see https://ymp.readthedocs.io for more examples.")
|
ericlink/adms-server | playframework-dist/play-1.1/python/Lib/distutils/sysconfig.py | Python | mit | 19,643 | 0.001527 | """Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
__revision__ = "$Id: sysconfig.py 52234 2006-10-08 17:50:26Z ronald.oussoren $"
import os
import re
import string
import sys
from distutils.errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
argv0_path = os.path.dirname(os.path.abspath(sys.executable))
landmark = os.path.join(argv0_path, "Modules", "Setup")
python_build = os.path.isfile(landmark)
del landmark
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return sys.version[:3]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if python_build:
base = os.path.dirname(os.path.abspath(sys.executable))
if plat_specific:
inc_dir = base
else:
inc_dir = os.path.join(base, "Include")
if not os.path.exists(inc_dir):
inc_dir = os.path.join(os.path.dirname(base), "Include")
return inc_dir
return os.path.join(prefix, "include", "python" + get_python_version())
elif os.name == "nt":
return os.path.join(prefix, "include")
elif os.name == "mac":
if plat_specific:
return os.path.join(prefix, "Mac", "Include")
else:
return os.path.join(prefix, "Include")
elif os.name == "os2":
return os.path.join(prefix, "Include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.prefix or
sys.exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
if get_python_version() < "2.2":
return prefix
else:
return os.path.join(PREFIX, "Lib", "site-packages")
elif os.name == "mac":
if plat_specific:
if standard_lib:
return os.path.join(prefix, "Lib", "lib-dynload")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
if standard_lib:
return os.path.join(prefi | x, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
elif os.name == "os2":
if standard_lib:
return os.path.join(PREFIX, "Lib")
else:
return os.path.join(PREFIX, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platfo | rm '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
(cc, cxx, opt, cflags, ccshared, ldshared, so_ext) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SO')
if os.environ.has_key('CC'):
cc = os.environ['CC']
if os.environ.has_key('CXX'):
cxx = os.environ['CXX']
if os.environ.has_key('LDSHARED'):
ldshared = os.environ['LDSHARED']
if os.environ.has_key('CPP'):
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if os.environ.has_key('LDFLAGS'):
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if os.environ.has_key('CFLAGS'):
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if os.environ.has_key('CPPFLAGS'):
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc)
compiler.shared_lib_extension = so_ext
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
inc_dir = argv0_path
else:
inc_dir = get_python_inc(plat_specific=1)
if get_python_version() < '2.2':
config_h = 'config.h'
else:
# The name of the config.h file changed in 2.2
config_h = 'pyconfig.h'
return os.path.join(inc_dir, config_h)
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(os.path.dirname(sys.executable), "Makefile")
lib_dir = get_python_lib(plat_specific=1, standard_lib=1)
return os.path.join(lib_dir, "config", "Makefile")
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while 1:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m: |
akabos/NearPy | nearpy/utils/utils.py | Python | mit | 2,882 | 0.000694 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 Ole Krause-Sparmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import numpy
import scipy
def numpy_array_from_list_or_numpy_array(vec | tors):
"""
Returns numpy array representation of argument.
Argument maybe numpy array (input is returned)
or a list of numpy vectors.
"""
# If vectors is not a numpy matrix, create one
if not isinstance(vectors, numpy.ndarray):
V = numpy.zeros( | (vectors[0].shape[0], len(vectors)))
for index in range(len(vectors)):
vector = vectors[index]
V[:, index] = vector
return V
return vectors
def unitvec(vec):
"""
Scale a vector to unit length. The only exception is the zero vector, which
is returned back unchanged.
"""
if scipy.sparse.issparse(vec): # convert scipy.sparse to standard numpy array
vec = vec.tocsr()
veclen = numpy.sqrt(numpy.sum(vec.data ** 2))
if veclen > 0.0:
return vec / veclen
else:
return vec
if isinstance(vec, numpy.ndarray):
vec = numpy.asarray(vec, dtype=float)
veclen = numpy.linalg.norm(vec)
if veclen > 0.0:
return vec / veclen
else:
return vec
def perform_pca(A):
"""
Computes eigenvalues and eigenvectors of covariance matrix of A.
The rows of a correspond to observations, the columns to variables.
"""
# First subtract the mean
M = (A-numpy.mean(A.T, axis=1)).T
# Get eigenvectors and values of covariance matrix
return numpy.linalg.eig(numpy.cov(M))
PY2 = sys.version_info[0] == 2
if PY2:
bytes_type = str
else:
bytes_type = bytes
def want_string(arg, encoding='utf-8'):
if isinstance(arg, bytes_type):
rv = arg.decode(encoding)
else:
rv = arg
return rv
|
pbrod/numpy | numpy/linalg/tests/test_linalg.py | Python | bsd-3-clause | 74,497 | 0.00055 | """ Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
| array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. | - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + |
gobstones/PyGobstones-Lang | tests/utils.py | Python | gpl-3.0 | 3,644 | 0.005488 | import importlib
import os
import subprocess
import math
import itertools
import random
def eqValue(gbsv, pyv):
return gbsv == str(pyv)
def delete_files_in_dir(dir, exceptions=[]):
for f in os.listdir(dir):
if not f in exceptions:
os.remove(os.path.join(dir, f))
def group(lst, n):
res = []
sublst = []
for x in lst:
sublst.append(x)
if len(sublst) == n:
res.append(sublst)
sublst = []
if len(sublst) > 0:
res.append(sublst)
return res
def flatten(lst):
res = []
for x in lst:
if isinstance(x, list):
res.extend(flatten(x))
else:
res.append(x)
return res
def parent_dir(f):
return os.path.split(os.path.realpath(f))[0]
def module_dir(obj):
return os.path.dirname(__file__)
def dir_has_tests(dir):
for fn in os.listdir(os.path.join(parent_dir(__file__), dir)):
if "test" in fn:
return True
return False
def is_module(module_name):
try:
importlib.import_module(module_name)
return True
except Exception as e:
return False
def run_cmd(cmd):
return subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE).communicate()
def append_file(fn, s):
f = open(fn, "a")
f.write(s)
f.close
def read_file(fn):
f = open(fn, 'r')
s = f.read()
f.close()
return s
def read_file_lines(fn):
f = open(fn, 'r')
ls = f.readlines()
f.close
return ls
def write_file(fn, s):
f = open(fn, "w")
f.write(s)
f.close()
def copy_file(fn, fnnew):
write_file(fnnew, read_file(fn))
def temp_test_file(codestr):
fn = os.path.dirname(__file__) + "/examples/" + str(id(codestr)) + ".gbs"
write_file(fn, codestr)
return fn
def unzip(l):
return [list(t) for t in zip(*l)]
def first_half(lst):
return lst[:len(lst) / 2]
def second_half(lst):
return lst[len(lst) / 2:]
def all_permutations(xs):
if xs == []:
yield []
else:
for i in range(len(xs)):
for p in all_permutations(xs[:i] + xs[i + 1:]):
yield [xs[i]] + p
def all_subsets(xs):
if xs == []:
yield []
else:
for s in all_subsets(xs[1:]):
yield s
yield [xs[0]] + s
def all_slicings(xs):
if len(xs) == 0:
yield []
elif len(xs) == 1:
yield [xs]
else:
for s in all_slicings(xs[1:]):
yield [[xs[0]]] + s
yield [[xs[0]] + s[0]] + s[1:]
def ifloor(f):
return int(math.floor(f))
def iceil(f):
return int(math.ceil(f))
randint = lambda x: random.randint(0,x-1)
def randomList(generator, max_size=16):
return [generator(i) for i in range(randint(max_size) + 4)]
def randomIntList(max_size=16, max_number=99):
return randomList(lambda i: randint(max_number), max_size)
def nats(start, end):
if (start < end):
return list(range(start, end+1))
else:
l = list(range(end, start+1))
l.reverse()
return l
BINOPS = {
"+": lambda x, y: x + y,
"-": lambda x, y: x - y,
"*": lambda x, y: x * y,
"div": lambda x, y: x / y,
"mod": la | mbda x, y: x % y,
}
binop = lambda op, x, y: BINOPS[op](x,y)
# Gbs syntax
isEmpty = lambda xs: len(xs) == 0
head = lambda xs: xs[0]
tail = lambda xs: xs[1:]
# Test scripts
def combine_args(args):
prod = itertools.product(*args.values())
return [dict(zip(args.keys(),pargs)) for pargs in prod]
COLORS = ["Azul", "Negro", "Rojo", "Verde"]
DIRS = ["Norte", "Este", "Sur", "O | este"]
BOOLS = ["True", "False"] |
Andrwe/py3status | py3status/modules/wanda_the_fish.py | Python | bsd-3-clause | 5,569 | 0.001437 | # -*- coding: utf-8 -*-
"""
Display a fortune-telling, swimming fish.
Wanda has no use what-so-ever. It only takes up disk space and compilation time,
and if loaded, it also takes up precious bar space, memory, and cpu cycles.
Anybody found using it should be promptly sent for a psychiatric evaluation.
Configuration parameters:
cache_timeout: refresh interval for this module (default 0)
format: display format for this module
(default '{nomotion}[{fortune} ]{wanda}{motion}')
fortune_timeout: refresh interval for fortune (default 60)
Format placeholders:
{fortune} one of many aphorisms or vague prophecies
{wanda} name of one of the most commonly kept freshwater aquarium fish
{motion} biologically propelled motion through a liquid medium
{nomotion} opposit | e behavior of motion to prevent modules from shifting
Optional:
fortune-mod: the fortune cookie program from bsd games
Examples:
```
# disable motions when not in use
wanda_the_fish {
format = '[\?if=fortune {nomotion}][{fortune} ]'
format += '{wanda}[\?if=fortune | {motion}]'
}
# no updates, no motions, yes fortunes, you click
wanda_the_fish {
format = '[{fortune} ]{wanda}'
cache_timeout = -1
}
# wanda moves, fortunes stays
wanda_the_fish {
format = '[{fortune} ]{nomotion}{wanda}{motion}'
}
# wanda is swimming too fast, slow down wanda
wanda_the_fish {
cache_timeout = 2
}
```
@author lasers
SAMPLE OUTPUT
[
{'full_text': 'innovate, v.: To annoy people.'},
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>< ', 'color': '#ff8c00'},
]
idle
[
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>3', 'color': '#ff8c00'},
]
py3status
[
{'full_text': 'py3status is so cool!'},
{'full_text': ' <', 'color': '#ffa500'},
{'full_text': '\xba', 'color': '#add8e6'},
{'full_text': ',', 'color': '#ff8c00'},
{'full_text': '))', 'color': '#ffa500'},
{'full_text': '))>< ', 'color': '#ff8c00'},
]
"""
from time import time
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 0
format = "{nomotion}[{fortune} ]{wanda}{motion}"
fortune_timeout = 60
def post_config_hook(self):
body = (
"[\?color=orange&show <"
"[\?color=lightblue&show º]"
"[\?color=darkorange&show ,]))"
"[\?color=darkorange&show ))>%s]]"
)
wanda = [body % fin for fin in ("<", ">", "<", "3")]
self.wanda = [self.py3.safe_format(x) for x in wanda]
self.wanda_length = len(self.wanda)
self.index = 0
self.fortune_command = ["fortune", "-as"]
self.fortune = self.py3.storage_get("fortune") or None
self.toggled = self.py3.storage_get("toggled") or False
self.motions = {"motion": " ", "nomotion": ""}
# deal with {new,old} timeout between storage
fortune_timeout = self.py3.storage_get("fortune_timeout")
timeout = None
if self.fortune_timeout != fortune_timeout:
timeout = time() + self.fortune_timeout
self.time = (
timeout or self.py3.storage_get("time") or (time() + self.fortune_timeout)
)
def _set_fortune(self, state=None, new=False):
if not self.fortune_command:
return
if new:
try:
fortune_data = self.py3.command_output(self.fortune_command)
except self.py3.CommandError:
self.fortune = ""
self.fortune_command = None
else:
self.fortune = " ".join(fortune_data.split())
self.time = time() + self.fortune_timeout
elif state is None:
if self.toggled and time() >= self.time:
self._set_fortune(new=True)
else:
self.toggled = state
if state:
self._set_fortune(new=True)
else:
self.fortune = None
def _set_motion(self):
for k in self.motions:
self.motions[k] = "" if self.motions[k] else " "
def _set_wanda(self):
self.index += 1
if self.index >= self.wanda_length:
self.index = 0
def wanda_the_fish(self):
self._set_fortune()
self._set_motion()
self._set_wanda()
return {
"cached_until": self.py3.time_in(self.cache_timeout),
"full_text": self.py3.safe_format(
self.format,
{
"fortune": self.fortune,
"motion": self.motions["motion"],
"nomotion": self.motions["nomotion"],
"wanda": self.wanda[self.index],
},
),
}
def kill(self):
self.py3.storage_set("toggled", self.toggled)
self.py3.storage_set("fortune", self.fortune)
self.py3.storage_set("fortune_timeout", self.fortune_timeout)
self.py3.storage_set("time", self.time)
def on_click(self, event):
if not self.fortune_command:
return
self._set_fortune(not self.toggled)
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
macloo/web-scraper-steps | MLS_scraper_4.py | Python | mit | 1,865 | 0.009115 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import time
# time will allow a delay in program execution
# added 2 time delays: One for each page while scraping the URLs,
# and one for each page while scraping the text about each player
# also added get_text() in the get_player_details() function
html = urlopen("http://www.mlssoccer.com/players")
bsObj = BeautifulSoup(html, "html.parser")
p = open("mls_players.txt", 'a')
player_list = []
# player links are on multiple pages -- get the next page URL
def get_next_page(html, bsObj):
next_page = bsObj.find( " | a", {"title":"Go to next page"} )
if next_page and ('href' in next_page.attrs):
| partial = str(next_page.attrs['href'])
new_url = "http://www.mlssoccer.com" + partial
html = urlopen(new_url)
bsObj = BeautifulSoup(html, "html.parser")
get_player_pages(html, bsObj)
else:
print("Done collecting URLs ...")
# run this on each page to get player detail page links
def get_player_pages(html, bsObj):
global player_list
tag_list = bsObj.findAll( "a", {"class":"row_link"} )
for tag in tag_list:
if 'href' in tag.attrs:
player_list.append(str(tag.attrs['href']))
# else:
# f.write("no href\n")
# delay program for 1 second
time.sleep(1)
get_next_page(html, bsObj)
def get_player_details(player_list):
for player in player_list:
new_url = "http://www.mlssoccer.com" + player
html = urlopen(new_url)
bsObj = BeautifulSoup(html, "html.parser")
title = (bsObj.find( "div", {"class":"title"} ))
print(title.get_text())
# # delay program for 2 seconds
time.sleep(2)
# collect all the URLs
get_player_pages(html, bsObj)
# collect text from each player detail page
get_player_details(player_list)
p.close()
|
jumpstarter-io/nova | nova/objects/base.py | Python | apache-2.0 | 28,219 | 0.000035 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova common internal object model"""
import collections
import copy
import datetime
import functools
import traceback
import netaddr
from oslo import messaging
import six
from nova import context
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.objects import fields
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.openstack.common import versionutils
LOG = logging.getLogger('object')
class NotSpecifiedSentinel:
pass
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_%s' % name
def make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in cls.fields.iteritems():
if not isinstance(field, fields.Field):
raise exception.ObjectFieldInvalid(
field=name, objname=cls.obj_name())
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
attrname = get_attrname(name)
field_value = field.coerce(self, name, value)
if field.read_only and hasattr(self, attrname):
# Note(yjiang5): _from_db_object() may iterate
# every field and write, no exception in such situation.
if getattr(self, attrname) != field_value:
raise exception.ReadOnlyFieldError(field=name)
else:
return
self._changed_fields.add(name)
try:
return setattr(self, attrname, field_value)
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_LE('Error setting %(attr)s'), {'attr': attr})
raise
setattr(cls, name, property(getter, setter))
class NovaObjectMetaclass(type):
"""Metaclass that allows tracking of object classes."""
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This means this is a base class using the metaclass. I.e.,
# the 'NovaObject' class.
cls._obj_classes = collections.defaultdict(list)
return
def _vers_tuple(obj):
return tuple([int(x) for x in obj.VERSION.split(".")])
# Add the subclass to NovaObject._obj_classes. If the
# same version already exists, replace it. Otherwise,
# keep the list with newest version first.
make_class_properties(cls)
obj_name = cls.obj_name()
for i, obj in enumerate(cls._obj_classes[obj_name]):
if cls.VERSION == obj.VERSION:
cls._obj_classes[obj_name][i] = cls
# Update nova.objects with this newer class.
setattr(objects, obj_name, cls)
break
if _vers_tuple(cls) > _vers_tuple(obj):
# Insert before.
cls._obj_classes[obj_name].insert(i, cls)
if i == 0:
# Later version than we've seen before. Update
# nova.objects.
setattr(objects, obj_name, cls)
break
else:
cls._obj_classes[obj_name].append(cls)
# Either this is the first time we've seen the object or it's
# an older version than anything we'e seen. Update nova.objects
# only if it's the first time we've seen this object name.
if not hasattr(objects, obj_name):
setattr(objects, obj_name, cls)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
ctxt = self._context
try:
if isinstance(args[0], (context.RequestContext)):
ctxt = args[0]
args = args[1:]
except IndexError:
pass
if ctxt is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
# Force this to be set if it wasn't before.
self._context = ctxt
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
ctxt, self, fn.__name__, args, kwargs)
for key, value in updates.iteritems():
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since NovaObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, NovaObject):
self[key] = value
else:
self[key] = field.from_primitive(self, key, value)
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, ctxt, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
@six.add_metaclass(NovaObjectMetaclass)
class NovaObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remot | ed or instantiated
| via RPC |
Everley1993/Laky-Earo | setup.py | Python | apache-2.0 | 608 | 0.004934 | #!/usr/bin/python
# -*- coding:utf-8 -*-
from setuptools import setup
setup(
name='Earo',
version='0.1.0',
url='https://github.com/Everley1993/Laky-Earo',
lic | ense='Apache',
author='Everley',
author_email='463785757@qq.com',
description='A microframework based on EDA for business logic development.',
packages=['earo'],
package_data={'earo':['static/css/*.css', 'static/fonts/*', 'static/js/*.js', 'static/*.html']},
include_packag | e_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'flask',
'enum',
'atomic',
]
)
|
Bolt64/my_code | Rosalind/edit_distancev3.py | Python | mit | 2,389 | 0.030557 | #!/usr/bin/python3
"""
A script to determine the edit distance between 2 strings
http://introcs.cs.princeton.edu/java/assignments/sequence.html
"""
from memoize import Memoize
penalties={'gap':2,'mismatch':1,'match':0}
def break_into_chunks(func):
chunksize=100
def wrapper(string1,string2):
for i in range(1,len(string1)):
# for j in range(1,len(string2)):
func(string1[-i*1:],string2)
for j in range(1,len(string2)):
func(string1,string2[-j:])
return func(string1, string2)
return wrapper
@Memoize
@break_into_chunks
def optimal_edit_distance(string1,string2):
if string1=='' and string2=='':
return 0
elif (string1=='') ^ (string2==''):
return 2*max(len(string1),len(string2))
else:
match='match'
if string1[0]!=string2[0]:
match='mismatch'
return min(
optim | al_edit_distance(string1[1:],string2[1:])+penalties[match],
optimal_edit_distance(string1[0:],string2[1:])+penalties['gap'],
optimal_edit_distance(string1[1:],string2[0:])+penalties['gap']
)
def optimal_alignment(string1,str | ing2):
opt=optimal_edit_distance
edit_dist=opt(string1,string2)
new_string1=''
new_string2=''
while (string1!='') or (string2!=''):
if (opt(string1,string2)-opt(string1[1:],string2[0:])==penalties['gap']):
new_string1+=string1[0]
new_string2+='-'
string1=string1[1:]
elif (opt(string1,string2)-opt(string1[0:],string2[1:])==penalties['gap']):
new_string1+='-'
new_string2+=string2[0]
string2=string2[1:]
elif (opt(string1,string2)-opt(string1[1:],string2[1:])==penalties['match']) or (opt(string1,string2)-opt(string1[1:],string2[1:])==penalties['mismatch']):
new_string1+=string1[0]
new_string2+=string2[0]
string1=string1[1:]
string2=string2[1:]
return (edit_dist,new_string1,new_string2)
def file_parser(filename):
strings=[]
with open(filename) as fileobject:
for line in fileobject.readlines():
strings.append(line.strip())
return strings
if __name__=='__main__':
from sys import argv
strings=file_parser(argv[-1])
print(optimal_alignment(strings[0],strings[1]))
|
pgexperts/patroni-compose | patroni/patroni/exceptions.py | Python | mit | 547 | 0.001828 | class Pat | roniException(Exception):
"""Parent class for all kind of exceptions related to selected distributed configuration store"""
def __init__(self, value):
self.value = value
def __str__(self):
"""
| >>> str(PatroniException('foo'))
"'foo'"
"""
return repr(self.value)
class PatroniCtlException(Exception):
pass
class PostgresException(PatroniException):
pass
class DCSError(PatroniException):
pass
class PostgresConnectionException(PostgresException):
pass
|
richard-willowit/odoo | addons/l10n_eu_service/wizard/__init__.py | Python | gpl-3.0 | 121 | 0 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
|
from . import wizard
| |
william-richard/moto | moto/support/responses.py | Python | apache-2.0 | 3,039 | 0.000658 | from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from .models import support_backends
import json
class SupportResponse(BaseResponse):
SERVICE_NAME = "support"
@property
def support_backend(self):
return support_backends[self.region]
def describe_trusted_advisor_checks(self):
language = self._get_param("language")
checks = self.support_backend.describe_trusted_advisor_checks(
language=language,
)
return json.dumps({"checks": ch | ecks})
def refresh_trusted_advisor_check(self):
chec | k_id = self._get_param("checkId")
status = self.support_backend.refresh_trusted_advisor_check(check_id=check_id,)
return json.dumps(status)
def resolve_case(self):
case_id = self._get_param("caseId")
resolve_case_response = self.support_backend.resolve_case(case_id=case_id,)
return json.dumps(resolve_case_response)
def create_case(self):
subject = self._get_param("subject")
service_code = self._get_param("serviceCode")
severity_code = self._get_param("severityCode")
category_code = self._get_param("categoryCode")
communication_body = self._get_param("communicationBody")
cc_email_addresses = self._get_param("ccEmailAddresses")
language = self._get_param("language")
issue_type = self._get_param("issueType")
attachment_set_id = self._get_param("attachmentSetId")
create_case_response = self.support_backend.create_case(
subject=subject,
service_code=service_code,
severity_code=severity_code,
category_code=category_code,
communication_body=communication_body,
cc_email_addresses=cc_email_addresses,
language=language,
issue_type=issue_type,
attachment_set_id=attachment_set_id,
)
return json.dumps(create_case_response)
def describe_cases(self):
case_id_list = self._get_param("caseIdList")
display_id = self._get_param("displayId")
after_time = self._get_param("afterTime")
before_time = self._get_param("beforeTime")
include_resolved_cases = self._get_param("includeResolvedCases", False)
next_token = self._get_param("nextToken")
max_results = self._get_int_param("maxResults")
language = self._get_param("language")
include_communications = self._get_param("includeCommunications", True)
describe_cases_response = self.support_backend.describe_cases(
case_id_list=case_id_list,
display_id=display_id,
after_time=after_time,
before_time=before_time,
include_resolved_cases=include_resolved_cases,
next_token=next_token,
max_results=max_results,
language=language,
include_communications=include_communications,
)
return json.dumps(describe_cases_response)
|
ramalho/eagle-py | tests/slider.py | Python | lgpl-2.1 | 611 | 0.003273 | #!/usr/bin/env python2
from eagle import *
def callback(app, entry, value):
print app, entry, value
App(title="Slider test",
left=Slider(id="hslider",
label="Slider:",
value_pos=Slider.POS_NONE,
| horizontal=True,
min=0, max=10,
callback=callback),
right=Slider(id="vslider",
label=None,
horizontal=False,
value_pos=Slider.POS_LEFT,
min=0, max=100,
| callback=callback,
expand_policy=ExpandPolicy.All()),
)
run()
|
vparitskiy/data-importer | data_importer/readers/xml_reader.py | Python | bsd-2-clause | 463 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.ElementTree as et
class XMLReader(object):
def __init__(self, instance):
self.instance = instance
def read(self):
"Convert XML to Dict"
tree = et.fromstring(self.instance.source)
elements = tree.f | indall(self. | instance.root)
for elem in elements:
items = list(elem)
content = [i.text for i in items]
yield content
|
ros/ros | tools/rosunit/test/test_dotname.py | Python | bsd-3-clause | 1,950 | 0.004615 | #!/usr/bin/env python
# This file should be run using a non-ros unit test framework such as nose using
# nosetests test_dotname.py. Alternative | ly, just run with python test_dotname.py.
# You will get the output from rostest as well.
import unittest
from dotname_cases import DotnameLoadingTest, NotTestCase
import rosunit
class TestDotnameLoading(unittest.TestCase):
def test_class_basic(self):
rosunit.unitrun('test_rosunit', 'test_class_basic', DotnameLoadingTest)
def test_class_dotname(self):
rosunit.unitrun('test_rosunit', 'test_class_dotname', 'test.dotname_cases.DotnameLoadingTest')
def test_m | ethod_dotname(self):
rosunit.unitrun('test_rosunit', 'test_method_dotname', 'test.dotname_cases.DotnameLoadingTest.test_a')
def test_suite_dotname(self):
rosunit.unitrun('test_rosunit', 'test_suite_dotname', 'test.dotname_cases.DotnameLoadingSuite')
def test_class_basic_nottest(self):
# class which exists but is not a TestCase
with self.assertRaises(SystemExit):
rosunit.unitrun('test_rosunit', 'test_class_basic_nottest', NotTestCase)
def test_class_dotname_nottest(self):
# class which exists but is not a valid test
with self.assertRaises(TypeError):
rosunit.unitrun('test_rosunit', 'test_class_dotname_nottest', 'test.dotname_cases.NotTestCase')
def test_class_dotname_noexist(self):
# class which does not exist in the module
with self.assertRaises(AttributeError):
rosunit.unitrun('test_rosunit', 'test_class_dotname_noexist', 'test.dotname_cases.DotnameLoading')
def test_method_dotname_noexist(self):
# method which does not exist in the class
with self.assertRaises(AttributeError):
rosunit.unitrun('test_rosunit', 'test_method_dotname_noexist', 'test.dotname_cases.DotnameLoadingTest.not_method')
if __name__ == '__main__':
unittest.main()
|
chrisdickinson/nojs | build/android/gyp/jinja_template.py | Python | bsd-3-clause | 5,601 | 0.007856 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Renders one or more template files using the Jinja template engine."""
import codecs
import argparse
import os
import sys
from util import build_utils
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir))
from pylib.constants import host_paths
# Import jinja2 from third_party/jinja2
sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party'))
import jinja2 # pylint: disable=F0401
class _RecordingFileSystemLoader(jinja2.FileSystemLoader):
def __init__(self, searchpath):
jinja2.FileSystemLoader.__init__(self, searchpath)
self.loaded_templates = set()
def get_source(self, environment, template):
contents, filename, uptodate = jinja2.FileSystemLoader.get_source(
self, environment, template)
self.loaded_templates.add(os.path.relpath(filename))
return contents, filename, uptodate
class JinjaProcessor(object):
"""Allows easy rendering of jinja templates with input file tracking."""
def __init__(self, loader_base_dir, variables=None):
self.loader_base_dir = loader_base_dir
self.variables = variables or {}
self.loader = _RecordingFileSystemLoader(loader_base_dir)
self.env = jinja2.Environment(loader=self.loader)
self.env.undefined = jinja2.StrictUndefined
self.env.line_comment_prefix = '##'
self.env.trim_blocks = True
self.env.lstrip_blocks = True
self._template_cache = {} # Map of path -> Template
def Render(self, input_filename, variables=None):
input_rel_path = os.path.relpath(input_filename, self.loader_base_dir)
template = self._template_cache.get(input_rel_path)
if not template:
template = self.env.get_template(input_rel_path)
self._template_cache[input_rel_path] = template
return template.render(variables or self.variables)
def GetLoadedTemplates(self):
return list(self.loader.loaded_templates)
def _ProcessFile(processor, input_filename, output_filename):
output = processor.Render(input_filename)
with codecs.open(output_filename, 'w', 'utf-8') as output_file:
output_file.write(output)
def _ProcessFiles(processor, input_filenames, inputs_base_dir, outputs_zip):
with build_utils.TempDir() as temp_dir:
for input_filename in input_filenames:
relpath = os.path.relpath(os.path.abspath(input_filename),
os.path.abspath(inputs_base_dir))
if relpath.startswith(os.pardir):
raise Exception('input file %s is not contained in inputs base dir %s'
% (input_filename, inputs_base_dir))
output_filename = os.path.join(temp_dir, relpath)
parent_dir = os.path.dirname(output_filename)
build_utils.MakeDirectory(parent_dir)
_ProcessFile(processor, input_filename, output_filename)
build_utils.ZipDir(outputs_zip, temp_dir)
def _ParseVariables(variables_arg, error_func):
variables = {}
for v in build_utils.ParseGnList(variables_arg):
if '=' not in v:
error_func('--variables argument must contain "=": ' + v)
name, _, value = v.partition('=')
variables[name] = value
return variables
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--inputs', required=True,
help='The template files to process.')
parser.add_argument('--output', help='The output file to generate. Valid '
'only if there is a single input.')
parser.add_argument('--outputs-zip', help='A zip file for the processed '
'templates. Required if there are multiple inputs.')
parser.add_argument('--inputs-base-dir', help='A common ancestor directory '
'of the inputs. Each output\'s path in the output zip '
'will match the relative path from INPUTS_BASE_DIR to '
'the input. Required if --output-zip | is g | iven.')
parser.add_argument('--loader-base-dir', help='Base path used by the '
'template loader. Must be a common ancestor directory of '
'the inputs. Defaults to DIR_SOURCE_ROOT.',
default=host_paths.DIR_SOURCE_ROOT)
parser.add_argument('--variables', help='Variables to be made available in '
'the template processing environment, as a GYP list '
'(e.g. --variables "channel=beta mstone=39")', default='')
build_utils.AddDepfileOption(parser)
options = parser.parse_args()
inputs = build_utils.ParseGnList(options.inputs)
if (options.output is None) == (options.outputs_zip is None):
parser.error('Exactly one of --output and --output-zip must be given')
if options.output and len(inputs) != 1:
parser.error('--output cannot be used with multiple inputs')
if options.outputs_zip and not options.inputs_base_dir:
parser.error('--inputs-base-dir must be given when --output-zip is used')
variables = _ParseVariables(options.variables, parser.error)
processor = JinjaProcessor(options.loader_base_dir, variables=variables)
if options.output:
_ProcessFile(processor, inputs[0], options.output)
else:
_ProcessFiles(processor, inputs, options.inputs_base_dir,
options.outputs_zip)
if options.depfile:
output = options.output or options.outputs_zip
deps = processor.GetLoadedTemplates()
build_utils.WriteDepfile(options.depfile, output, deps)
if __name__ == '__main__':
main()
|
janneke/schikkers-list | scripts/genicon.py | Python | agpl-3.0 | 833 | 0.032413 | #! /usr/bin/python
import os
import sys
import tempfile
base = os.path.splitext (os.path.split (sys.argv[1])[1])[0]
input = os.path.abspath (sys.argv[1])
output = os.path.abspath (sys.argv[2])
program_name = os.path.split (sys.argv[0])[1]
print program_name
dir = tempfile.mktemp (program_name)
os.mkdir (dir, 0777)
os.chdir (dir)
def system ( | c):
print c
if os.system (c):
raise 'barf'
outputs = []
for sz in [256,128,64,48,32,16] :
for depth in [24,8]:
out = '%(base)s-%(sz)d-%(depth)d.png' % locals()
system ('convert -depth %(depth)d -sample %(sz)d %(input)s %(out)s'
% locals ())
outputs.append (out)
print 'outputs=', outputs
| system ('icotool --output %s --create %s' % (output, ' '.join (outputs)))
system('rm -rf %(dir)s' % locals())
|
jorisvandenbossche/pandas | pandas/compat/pyarrow.py | Python | bsd-3-clause | 705 | 0 | """ support pyarrow compatibility across versions """
from pandas.util.version import Version
try:
import pyarrow as pa
_pa_version = pa.__version__
_palv = Version(_pa_version)
pa_version_under1p0 = _palv < Version("1.0.0")
pa_version_under2p0 = _palv < Version("2.0.0")
pa_version_under3p0 = _palv < Version("3.0.0")
pa_version_under4p0 = _palv < Version("4.0.0")
pa_version_under5p0 = _palv < Version("5.0.0")
pa_version_under6p0 = _palv < Version("6.0.0")
except ImportError:
pa_ve | rsion_under1p0 = True
| pa_version_under2p0 = True
pa_version_under3p0 = True
pa_version_under4p0 = True
pa_version_under5p0 = True
pa_version_under6p0 = True
|
CityofPittsburgh/pittsburgh-purchasing-suite | migrations/versions/16e5b0c1ffc8_contact_zip_code_to_text_field.py | Python | bsd-3-clause | 1,648 | 0.01335 | """contact zip code to text field
Revision ID: 16e5b0c1ffc8
Revises: 8ac7c042469
Create Date: 2015-10-12 15:23:39.600694
"""
# revision identifiers, used by Alembic.
revision = '16e5b0c1ffc8'
down_revision = '8ac7c042469'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column(u'opportunity' | , 'created_by_id',
existing_type=sa.INTEGER(),
nullable=True,
existing_server_default=sa.text(u'1'))
op.create_foreign_key('opportunity_created_from_id_contract_id_fkey',
'opportunity', 'contract', ['created_from_id'], ['id']
)
op.alter_column(u'company_contact', 'zip_code',
existing_type=sa.INTEGER(),
type_=sa.VARCHAR(255),
nullable=True
)
### end Alembic commands ###
op.exe | cute(
sa.sql.text(
'''
UPDATE company_contact
SET zip_code = rpad('0', 5, zip_code)
where char_length(zip_code) < 5
'''
)
)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.execute(
sa.sql.text(
'''ALTER TABLE company_contact ALTER COLUMN zip_code TYPE integer USING (trim(zip_code)::integer)
'''
)
)
op.drop_constraint('opportunity_created_from_id_contract_id_fkey',
'opportunity', type_='foreignkey'
)
op.alter_column(u'opportunity', 'created_by_id',
existing_type=sa.INTEGER(),
nullable=False,
existing_server_default=sa.text(u'1'))
### end Alembic commands ###
|
zozo123/buildbot | master/buildbot/test/unit/test_buildslave_libvirt.py | Python | gpl-3.0 | 10,089 | 0.000892 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from buildbot import config
from buildbot.buildslave import libvirt as libvirtbuildslave
from buildbot.test.fake import libvirt
from buildbot.test.util import compat
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet import utils
from twisted.python import failure
from twisted.trial import unittest
class TestLibVirtSlave(unittest.TestCase):
class ConcreteBuildSlave(libvirtbuildslave.LibVirtSlave):
pass
def setUp(self):
self.patch(libvirtbuildslave, "libvirt", libvirt)
self.conn = libvirtbuildslave.Connection("test://")
self.lvconn = self.conn.connection
def test_constructor_nolibvirt(self):
self.patch(libvirtbuildslave, "libvirt", None)
self.assertRaises(config.ConfigErrors, self.ConcreteBuildSlave,
'bot', 'pass', None, 'path', 'path')
@defer.inlineCallbacks
def test_constructor_minimal(self):
bs = self.ConcreteBuildSlave('bot', 'pass', self.conn, 'path', 'otherpath')
yield bs._find_existing_deferred
self.assertEqual(bs.slavename, 'bot')
self.assertEqual(bs.password, 'pass')
self.assertEqual(bs.connection, self.conn)
self.assertEqual(bs.image, 'path')
self.assertEqual(bs.base_image, 'otherpath')
self.assertEqual(bs.missing_timeout, 1200)
@defer.inlineCallbacks
def test_find_existing(self):
d = self.lvconn.fake_add("bot")
bs = self.ConcreteBuildSlave('bot', 'pass', self.conn, 'p', 'o')
yield bs._find_existing_deferred
self.assertEqual(bs.domain.domain, d)
self.assertEqual(bs.substantiated, True)
@defer.inlineCallbacks
def test_prepare_base_image_none(self):
self.patch(utils, "getProcessValue", mock.Mock())
utils.getProcessValue.side_effect = lambda x, y: defer.succeed(0)
bs = self.ConcreteBuildSlave('bot', 'pass', self.conn, 'p', None)
yield bs._find_existing_deferred
yield bs._prepare_base_image()
self.assertEqual(utils.getProcessValue.call_count, 0)
@defer.inlineCallbacks
def test_prepare_base_image_cheap(self):
self.patch(utils, "getProcessValue", mock.Mock())
utils.getProcessValue.side_effect = lambda x, y: defer.succeed(0)
bs = self.ConcreteBuildSlave('bot', 'pass', self.conn, 'p', 'o')
yield bs._find_existing_deferred
yield bs._prepare_base_image()
utils.getProcessValue.assert_called_with(
"qemu-img", ["create", "-b", "o", "-f", "qcow2", "p"])
@defer.inlineCallbacks
def test_prepare_base_image_full(self):
pass
self.patch(utils, "getProcessValue", mock.Mock())
utils.getProcessValue.side_effect = lambda x, y: defer.succeed(0)
bs = self.ConcreteBuildSlave('bot', 'pass', self.conn, 'p', 'o')
yield bs._find_existing_deferred
bs.cheap_copy = False
yield bs._prepare_base_image()
utils.getProcessValue.assert_called_with(
"cp", ["o", "p"])
@defer.inlineCallbacks
def test_start_instance(self):
bs = self.ConcreteBuildSlave('b', 'p', self.conn, 'p', 'o',
xml='<xml/>')
prep = mock.Mock()
prep.side_effect = lambda: defer.succeed(0)
self.patch(bs, "_prepare_base_image", prep)
yield bs._find_existing_deferred
started = yield bs.start_instance(mock.Mock())
self.assertEqual(started, True)
@compat.usesFlushLoggedErrors
@defer.inlineCallbacks
def test_start_instance_create_fails(self):
bs = self.ConcreteBuildSlave('b', 'p', self.conn, 'p', 'o',
xml='<xml/>')
prep = mock.Mock()
prep.side | _effect = lambda: defer.succeed(0)
self.patch(bs, "_prepare_base_image", prep)
create = mock.Mock()
create.side_effect = lambda sel | f: defer.fail(
failure.Failure(RuntimeError('oh noes')))
self.patch(libvirtbuildslave.Connection, 'create', create)
yield bs._find_existing_deferred
started = yield bs.start_instance(mock.Mock())
self.assertEqual(bs.domain, None)
self.assertEqual(started, False)
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
@defer.inlineCallbacks
def setup_canStartBuild(self):
bs = self.ConcreteBuildSlave('b', 'p', self.conn, 'p', 'o')
yield bs._find_existing_deferred
bs.updateLocks()
defer.returnValue(bs)
@defer.inlineCallbacks
def test_canStartBuild(self):
bs = yield self.setup_canStartBuild()
self.assertEqual(bs.canStartBuild(), True)
@defer.inlineCallbacks
def test_canStartBuild_notready(self):
"""
If a LibVirtSlave hasnt finished scanning for existing VMs then we shouldn't
start builds on it as it might create a 2nd VM when we want to reuse the existing
one.
"""
bs = yield self.setup_canStartBuild()
bs.ready = False
self.assertEqual(bs.canStartBuild(), False)
@defer.inlineCallbacks
def test_canStartBuild_domain_and_not_connected(self):
"""
If we've found that the VM this slave would instance already exists but hasnt
connected then we shouldn't start builds or we'll end up with a dupe.
"""
bs = yield self.setup_canStartBuild()
bs.domain = mock.Mock()
self.assertEqual(bs.canStartBuild(), False)
@defer.inlineCallbacks
def test_canStartBuild_domain_and_connected(self):
"""
If we've found an existing VM and it is connected then we should start builds
"""
bs = yield self.setup_canStartBuild()
bs.domain = mock.Mock()
isconnected = mock.Mock()
isconnected.return_value = True
self.patch(bs, "isConnected", isconnected)
self.assertEqual(bs.canStartBuild(), True)
class TestWorkQueue(unittest.TestCase):
def setUp(self):
self.queue = libvirtbuildslave.WorkQueue()
def delayed_success(self):
def work():
d = defer.Deferred()
reactor.callLater(0, d.callback, True)
return d
return work
def delayed_errback(self):
def work():
d = defer.Deferred()
reactor.callLater(0, d.errback,
failure.Failure(RuntimeError("Test failure")))
return d
return work
def expect_errback(self, d):
def shouldnt_get_called(f):
self.failUnlessEqual(True, False)
d.addCallback(shouldnt_get_called)
def errback(f):
# log.msg("errback called?")
pass
d.addErrback(errback)
return d
def test_handle_exceptions(self):
def work():
raise ValueError
return self.expect_errback(self.queue.execute(work))
def test_handle_immediate_errback(self):
def work():
return defer.fail(RuntimeError("Sad times"))
return self.expect_errback(self.queue.execute(work))
def test_handle_delayed_errback(self):
work = self.delayed_errback()
return self.expect_errback(self.queue.execute(work))
def test_handle_immediate_success(self):
def work():
return defer.succeed(True)
return self.queue.execute(work)
def test_handle_delayed_success(self |
XXLRay/libreshot | libreshot/classes/project.py | Python | gpl-3.0 | 13,790 | 0.034083 | # LibreShot Video Editor is a program that creates, modifies, and edits video files.
# Copyright (C) 2009 Jonathan Thomas, TJ
#
# This file is part of LibreShot Video Editor (http://launchpad.net/libreshot/).
#
# LibreShot Video Editor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LibreShot Video Editor is dist | ributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LibreShot Video Editor. If not, see <http://www.gnu.org/licenses/>.
import os, sys, locale
import gtk, re
import xml.dom.minidom as xml
from classes import profiles, files, thumbnail, open_pr | oject, save_project, state_project, restore_state, sequences, video, theme
# init the foreign language
from language import Language_Init
########################################################################
class project():
"""This is the main project class that contains all
the details of a project, such as name, folder, timeline
information, sequences, media files, etc..."""
#----------------------------------------------------------------------
def __init__(self, init_threads=True):
"""Constructor"""
# debug message/function control
self.DEBUG = True
# define common directories containing resources
# get the base directory of the libreshot installation for all future relative references
# Note: don't rely on __file__ to be an absolute path. E.g., in the debugger (pdb) it will be
# a relative path, so use os.path.abspath()
self.BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
self.UI_DIR = os.path.join(self.BASE_DIR, "libreshot", "windows", "ui")
self.IMAGE_DIR = os.path.join(self.BASE_DIR, "libreshot", "images")
self.LOCALE_DIR = os.path.join(self.BASE_DIR, "libreshot", "locale")
self.PROFILES_DIR = os.path.join(self.BASE_DIR, "libreshot", "profiles")
self.TRANSITIONS_DIR = os.path.join(self.BASE_DIR, "libreshot", "transitions")
self.BLENDER_DIR = os.path.join(self.BASE_DIR, "libreshot", "blender")
self.EXPORT_PRESETS_DIR = os.path.join(self.BASE_DIR, "libreshot", "export_presets")
self.EFFECTS_DIR = os.path.join(self.BASE_DIR, "libreshot", "effects")
# location for per-session, per-user, files to be written/read to
self.DESKTOP = os.path.join(os.path.expanduser("~"), "Desktop")
self.USER_DIR = os.path.join(os.path.expanduser("~"), ".libreshot")
self.THEMES_DIR = os.path.join(self.BASE_DIR, "libreshot", "themes")
self.USER_PROFILES_DIR = os.path.join(self.USER_DIR, "user_profiles")
self.USER_TRANSITIONS_DIR = os.path.join(self.USER_DIR, "user_transitions")
# only run the following code if we are really using
# this project file...
if init_threads:
# Add language support
translator = Language_Init.Translator(self)
_ = translator.lang.gettext
# init the variables for the project
from windows import preferences
self.name = _("Default Project")
self.folder = self.USER_DIR
self.project_type = preferences.Settings.general["default_profile"]
self.canvas = None
self.is_modified = False
self.refresh_xml = True
self.mlt_profile = None
# set theme
self.set_theme(preferences.Settings.general["default_theme"])
# reference to the main GTK form
self.form = None
# init the file / folder list (used to populate the tree)
self.project_folder = files.LibreShotFolder(self)
# ini the sequences collection
self.sequences = [sequences.sequence(_("Default Sequence 1"), self)]
# init the tab collection
self.tabs = [self.sequences[0]] # holds a refernce to the sequences, and the order of the tabs
# clear temp folder
self.clear_temp_folder()
# create thumbnailer object
self.thumbnailer = thumbnail.thumbnailer()
self.thumbnailer.set_project(self)
self.thumbnailer.start()
def set_theme(self, folder_name):
""" Set the current theme and theme settings """
# Set the theme, and load the theme settings
self.theme = folder_name
self.theme_settings = theme.theme(folder_name=self.theme, project=self)
# check for empty theme settings (and use blue_glass instead if needed)
if not self.theme_settings.settings:
self.theme = "blue_glass"
self.theme_settings = theme.theme(folder_name=self.theme, project=self)
def fps(self):
# get the profile object
if self.mlt_profile == None:
self.mlt_profile = profiles.mlt_profiles(self).get_profile(self.project_type)
# return the frames per second
return self.mlt_profile.fps()
def clear_temp_folder(self):
"""This method deletes all files in the /libreshot/temp folder."""
path = os.path.join(self.USER_DIR)
# get pid from lock file
pidPath = os.path.join(path, "pid.lock")
f = open(pidPath, 'r')
pid=int(f.read().strip())
f.close()
# list of folders that should not be deleted
safe_folders = ["blender", "queue", "user_profiles", "user_transitions"]
# loop through all folders in the USER_DIR
for child_path in os.listdir(path):
if os.path.isdir(os.path.join(path, child_path)):
if child_path not in safe_folders:
# clear all files / folders recursively in the thumbnail folder
if os.getpid() == pid:
# only clear this folder for the primary instance of LibreShot
self.remove_files(os.path.join(path, child_path))
# remove folder
os.removedirs(os.path.join(path, child_path))
# thumbnail path
thumbnail_path = os.path.join(path, "thumbnail")
# create thumbnail folder (if it doesn't exist)
if os.path.exists(thumbnail_path) == False:
# create new thumbnail folder
os.mkdir(thumbnail_path)
def remove_files(self, path):
# verify this folder exists
if os.path.exists(path):
# loop through all files in this folder
for child_path in os.listdir(path):
# get full child path
child_path_full = os.path.join(path, child_path)
if os.path.isdir(child_path_full) == True:
# remove items in this folder
self.remove_files(child_path_full)
# remove folder
os.removedirs(child_path_full)
else:
# remove file
os.remove(child_path_full)
#----------------------------------------------------------------------
def __setstate__(self, state):
""" This method is called when an LibreShot project file is un-pickled (i.e. opened). It can
be used to update the structure of the old project class, to make old project files compatable with
newer versions of LibreShot. """
# Check for missing DEBUG attribute (which means it's an old project format)
#if 'DEBUG' not in state:
# create empty new project class
empty_project = project(init_threads=False)
state['DEBUG'] = empty_project.DEBUG
state['BASE_DIR'] = empty_project.BASE_DIR
state['UI_DIR'] = empty_project.UI_DIR
state['IMAGE_DIR'] = empty_project.IMAGE_DIR
state['LOCALE_DIR'] = empty_project.LOCALE_DIR
state['PROFILES_DIR'] = empty_project.PROFILES_DIR
state['TRANSITIONS_DIR'] = empty_project.TRANSITIONS_DIR
state['BLENDER_DIR'] = empty_project.BLENDER_DIR
state['EXPORT_PRESETS_DIR'] = empty_project.EXPORT_PRESETS_DIR
state['EFFECTS_DIR'] = empty_project.EFFECTS_DIR
state['USER_DIR'] = empty_project.USER_DIR
state['DESKTOP'] = empty_project.DESKTOP
state['THEMES_DIR'] = empty_project.THEMES_DIR
state['USER_PROFILES_DIR'] = empty_project.USER_PROFILES_DIR
state['USER_TRANSITIONS_DIR'] = empty_project.USER_TRANSITIONS_DIR
state['refresh_xml'] = True
state['mlt_profile'] = None
empty_project = None
# update the state object with new schema changes
self.__dict__.update(state)
#----------------------------------------------------------------------
def Render(self):
"""This method recursively renders all the tracks and clips |
romeubertho/USP-IntroPython | django/learning_log/learning_log/learning_logs/migrations/0002_teste.py | Python | mit | 647 | 0.001546 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-20 14:32
from __future__ import unicode_literals
from django.db import migrat | ions, models
class Migration(migrations.Migration):
dependencies = [
('learning_logs', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Teste',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('teste', models.CharField(max_length=200)),
('date_added', models.Date | TimeField(auto_now_add=True)),
],
),
]
|
BubuLK/sfepy | sfepy/solvers/auto_fallback.py | Python | bsd-3-clause | 1,594 | 0 | from __future__ import absolute_import
from sfepy.base.base import Struct
from sfepy.solvers.solvers import Solver, use_first_available
class AutoFallback | Solver(Solver):
"""
Base class for virtual solvers with the automatic fallback.
"""
_ls_solvers = []
def __new__(cls, conf, **kwargs):
"""
Choose an available solver from `self._ls_solvers`.
Parameters
----------
conf : dict
The solver configuration.
"""
ls_solvers = [(ls, Struct(**_conf) + Struct(kind=ls)) |
for ls, _conf in cls._ls_solvers]
return use_first_available(ls_solvers)
class AutoDirect(AutoFallbackSolver):
"""The automatically selected linear direct solver.
The first available solver from the following list is used:
`ls.mumps <sfepy.solvers.ls.MUMPSSolver>`,
`ls.scipy_umfpack <sfepy.solvers.ls.ScipyUmfpack>` and
`ls.scipy_superlu <sfepy.solvers.ls.ScipySuperLU>`.
"""
name = 'ls.auto_direct'
_ls_solvers = [
('ls.mumps', {}),
('ls.scipy_umfpack', {}),
('ls.scipy_superlu', {})
]
class AutoIterative(AutoFallbackSolver):
"""The automatically selected linear iterative solver.
The first available solver from the following list is used:
`ls.petsc <sfepy.solvers.ls.PETScKrylovSolver>` and
`ls.scipy_iterative <sfepy.solvers.ls.ScipyIterative>`
"""
name = 'ls.auto_iterative'
_ls_solvers = [
('ls.petsc', {'method': 'cg', 'precond': 'icc'}),
('ls.scipy_iterative', {'method': 'cg'}),
]
|
yandexdataschool/gumbel_lstm | gumbel_softmax.py | Python | mit | 3,120 | 0.010901 | # -*- coding: utf-8 -*-
"""
a bunch of lasagne code implementing gumbel softmax
https://arxiv.org/abs/1611.01144
"""
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from lasagne.random import get_rng
from lasagne.layers import Layer
class GumbelSoftmax:
"""
A gumbel-softmax nonlinearity with gumbel(0,1) noize
In short, it's a quasi-one-hot nonlinearity that "samples" from softmax
categorical distribution.
Explaination and motivation: https://arxiv.org/abs/1611.01144
Code mostly follows http://blog.evjang.com/2016/11/tutorial-categorical-variational.html
Softmax normalizes over the LAST axis (works exactly as T.nnet.softmax for 2d).
:param t: temperature of sampling. Lower means more spike-like sampling. Can be symbolic.
:param eps: a small number used for numerical stability
:returns: a callable that can (and should) be used as a nonlinearity
"""
def __init__(self,
| t=0.1,
eps=1e-20):
assert t != 0
self.temperature=t
self.eps=eps
self._srng = RandomStreams(get_rng().randint(1, 2147462579))
def __call__(self,logits):
"""computes a gumbel softmax sample"""
#sample from Gumbel(0, 1)
uniform = self._srng.uniform(logits.shape,low=0,high=1)
gumbel = -T.log(-T.log(uniform + self.eps) + self.eps)
#draw a sample from the Gumbel-Softmax dist | ribution
return T.nnet.softmax((logits + gumbel) / self.temperature)
def onehot_argmax(logits):
"""computes a hard one-hot vector encoding maximum"""
return T.extra_ops.to_one_hot(T.argmax(logits,-1),logits.shape[-1])
class GumbelSoftmaxLayer(Layer):
"""
lasagne.layers.GumbelSoftmaxLayer(incoming,**kwargs)
A layer that just applies a GumbelSoftmax nonlinearity.
In short, it's a quasi-one-hot nonlinearity that "samples" from softmax
categorical distribution.
If you provide "hard_max=True" in lasagne.layers.get_output
it will instead compute one-hot of aт argmax.
Softmax normalizes over the LAST axis (works exactly as T.nnet.softmax for 2d).
Explaination and motivation: https://arxiv.org/abs/1611.01144
Code mostly follows http://blog.evjang.com/2016/11/tutorial-categorical-variational.html
Parameters
----------
incoming : a :class:`Layer` instance or a tuple
The layer feeding into this layer, or the expected input shape
t: temperature of sampling. Lower means more spike-like sampling. Can be symbolic (e.g. shared)
eps: a small number used for numerical stability
"""
def __init__(self, incoming, t=0.1, eps=1e-20, **kwargs):
super(GumbelSoftmaxLayer, self).__init__(incoming, **kwargs)
self.gumbel_softmax = GumbelSoftmax(t=t,eps=eps)
def get_output_for(self, input, hard_max=False, **kwargs):
if hard_max:
return onehot_argmax(input)
else:
return self.gumbel_softmax(input)
|
deepmind/lab | python/tests/utils/maze_game_controller_test.py | Python | gpl-2.0 | 2,542 | 0.006688 | """Tests maze_game_controller."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import absltest
import numpy as np
import six
import deepmind_lab
from python.tests.utils import maze_game_controller
from python.tests.utils import test_environment_decorator
class MazeGameControllerTest(absltest.TestCase):
def setUp(self):
self._env = test_environment_decorator.TestEnvironmentDecorator(
deepmind_lab.Lab('tests/maze_navigation_test',
maze_game_controller.REQUIRED_OBSERVATIONS))
self._controller = maze_game_controller.MazeGameController(self._env)
self._env.reset()
def testSpawnPosition(self):
pos = self._controller.maze_position()
self.assertTrue(np.array_equal(pos, [2, 3]))
def testMoveToReachablePos(self):
self.assertTrue(self._controller.move_to(2, 5))
pos = self._controller.maze_position()
self.assertTrue(np.array_equal(pos, [2, 5]))
def testMoveToUnreachablePos(self):
self.assertFalse(self._controller.move_to(20, 1))
| self.assertFalse(self._controller.move_to(3, 4))
def testFollowPat | hToReachablePos(self):
path = self._controller.find_path(2, 5)
self.assertTrue(self._controller.follow_path(path))
pos = self._controller.maze_position()
self.assertTrue(np.array_equal(pos, [2, 5]))
def testFollowSparsePathToReachablePos(self):
self.assertTrue(self._controller.follow_path([(2, 5)]))
pos = self._controller.maze_position()
self.assertTrue(np.array_equal(pos, [2, 5]))
def testFollowPathToUneachablePos(self):
self.assertFalse(self._controller.follow_path([(20, 1)]))
def testFailToGoTrhoughBlockedCorridor(self):
self.assertFalse(self._controller.move_to(2, 5, blocked=[(1, 4)]))
def testPickupLocations(self):
self.assertTrue(np.array_equal(self._controller.pickup_location(0), [5, 1]))
self.assertTrue(np.array_equal(self._controller.pickup_location(1), [6, 1]))
self.assertTrue(np.array_equal(self._controller.pickup_location(2), [4, 3]))
def testPathExists(self):
path = self._controller.find_path(2, 5)
six.assertCountEqual(self, path, [(1, 3), (1, 4), (1, 5), (2, 5)])
def testPathDoesNotExist(self):
self.assertIsNone(self._controller.find_path(3, 4))
if __name__ == '__main__':
if 'TEST_SRCDIR' in os.environ:
deepmind_lab.set_runfiles_path(
os.path.join(os.environ['TEST_SRCDIR'],
'org_deepmind_lab'))
absltest.main()
|
SandstoneHPC/OIDE | sandstone/lib/ui_methods.py | Python | mit | 647 | 0.01391 | from sandstone import settings
def get_app_descriptions(*args,**kwargs):
desc_list = []
for app_spec in settings.APP_SPECIFICATIONS: |
desc_list.append(app_spec['APP_DESCRIPTION'])
return desc_list
def get_ng_module_spec(*args,**kwargs):
mod_list = []
for app_spec in settings.APP_SPECIFICATIONS:
mod_list.append({
'module_name':app_spec['NG_MODULE_NAME'],
'stylesheets':app_spec['NG_MODULE_STYLESHEETS'],
'scripts':app_spec['NG_MODULE_SCRIPTS']
})
return mod_list
def get_url_prefix(*args,**kwargs):
url_prefix = settings.URL_PREFIX
return url_p | refix
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.