repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
josephsuh/extra-specs | nova/tests/api/openstack/compute/test_consoles.py | Python | apache-2.0 | 11,446 | 0.000786 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import webob
from nova.api.openstack.compute import consoles
from nova.compute import vm_states
from nova import console
from nova import db
from nova import exception
from nova import flags
from nova import test
from nova.tests.api.openstack import fakes
from nova import utils
FLAGS = flags.FLAGS
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeInstanceDB(object):
def __init__(self):
self.instances_by_id = {}
self.ids_by_uuid = {}
self.max_id = 0
def return_server_by_id(self, context, id):
if id not in self.instances_by_id:
self._add_server(id=id)
return dict(self.instances_by_id[id])
def return_server_by_uuid(self, context, uuid):
if uuid not in self.ids_by_uuid:
self._add_server(uuid=uuid)
return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
def _add_server(self, id=None, uuid=None):
if id is None:
id = self.max_id + 1
if uuid is None:
uuid = str(utils.gen_uuid())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
if id > self.max_id:
self.max_id = id
def stub_instance(id, user_id='fake', project_id='fake', host=None,
vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0):
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"hostname": "",
"host": host,
"instance_type": {},
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": utils.utcnow(),
"launched_at": utils.utcnow(),
"terminated_at": utils.utcnow(),
"availability_zone": "",
"display_name": server_name,
"display_description": "",
"locked": False,
"metadata": [],
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress}
return instance
class ConsolesControllerTest(test.TestCase):
def setUp(self):
super(ConsolesControllerTest, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
self.stubs.Set(db, 'instance_get',
self.instance_db.return_server_by_id)
self.stubs.Set(db, 'instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
self.uuid = str(utils.gen_uuid())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self.controller = consoles.Controller()
def test_create_console(self):
def fake_create_console(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
return {}
self.stubs.Set(console.api.API, 'create_console', fake_create_console)
req = fakes.HTTPRequest.blank(self.url)
self.controller.create(req, self.uuid)
def test_show_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool, instance_name='inst-0001')
expected = {'console': {'id': 20,
'port': 'fake_port',
'host': 'fake_hostname',
'password': 'fake_password',
'instance_name': 'inst-0001',
'console_type': 'fake_type'}}
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
self.assertDictMatch(res_dict, expected)
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_show_console_unknown_instance(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_list_consoles(self):
def fake_get_consoles(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
pool1 = dict(console_type='fake_type',
public_hostname='fake_hostname')
cons1 = dict(id=10, password='fake_password',
port='fake_port', pool=pool1)
pool2 = dict(console_type='fake_type2',
public_hostname='fake_hostname2')
cons2 = dict(id=11, password='fake_password2',
port='fake_port2', pool=pool2)
return [cons1, cons2]
expected = {'consoles':
[{'console': {'id': 10, 'console_type': 'fake_type'}},
{'console': {'id': 11, 'console_type': 'fake_type2'}}]}
self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
self.assertDictMatch(res_dict, expected)
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool)
def fa | ke_delete_console(cons_self, cont | ext, instance_id, console_id):
self.ass |
cloudera/Impala | tests/benchmark/plugins/__init__.py | Python | apache-2.0 | 1,407 | 0.004264 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class Plugin(object):
'''Base plugin class.
Defines the interfaces that all plugins will use.
The interface consists of:
* The scope, which defines when the plugin will run.
The different scopes are:
* per query
* per workload
* per | test suite run
* A pre-hook method, which is run at the beginning of the 'scope'
* A post-hook method, which is run at the end of the scope.
'''
__name__ = "BasePlugin"
def __init__(self, scope=None):
self.scope = scope
def run_pre | _hook(self, context=None):
pass
def run_post_hook(self, context=None):
pass
|
mback2k/django-bawebauth | bawebauth/apps/bawebauth/migrations/0001_initial.py | Python | mit | 1,925 | 0.005714 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import bawebauth.apps.bawebauth.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateMode | l(
name='Device',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('ident', models.CharField(max_length=40, verbose_name='ident')),
('crdate', models.DateTimeField(auto_now_add=True, verbose_name='date created')),
('tstamp', models.DateTime | Field(auto_now=True, verbose_name='date edited')),
('active', models.BooleanField(default=False, verbose_name='active')),
('enabled', models.BooleanField(default=False, verbose_name='enabled')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Usage',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('send', bawebauth.apps.bawebauth.fields.PositiveBigIntegerField(verbose_name='bytes send')),
('received', bawebauth.apps.bawebauth.fields.PositiveBigIntegerField(verbose_name='bytes received')),
('crdate', models.DateTimeField(auto_now_add=True, verbose_name='date created')),
('device', models.ForeignKey(to='bawebauth.Device')),
],
options={
},
bases=(models.Model,),
),
]
|
robotcator/gensim | gensim/test/test_text_analysis.py | Python | lgpl-2.1 | 6,853 | 0.001605 | import logging
import unittest
from gensim.corpora.dictionary import Dictionary
from gensim.topic_coherence.text_analysis import \
InvertedIndexAccumulator, WordOccurrenceAccumulator, ParallelWordOccurrenceAccumulator, \
CorpusAccumulator
class BaseTestCases(object):
class TextAnalyzerTestBase(unittest.TestCase):
texts = [
['this', 'is', 'a'],
['test', 'document'],
['this', 'test', 'document'],
['test', 'test', 'this']
]
token2id = {
'this': 10,
'is': 15,
'a': 20,
'test': 21,
'document': 17
}
dictionary = Dictionary(texts)
dictionary.token2id = token2id
dictionary.id2token = {v: k for k, v in token2id.items()}
top_ids = set(token2id.values())
texts2 = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey'],
['user', 'user']
]
dictionary2 = Dictionary(texts2)
dictionary2.id2token = {v: k for k, v in dictionary2.token2id.items()}
top_ids2 = set(dictionary2.token2id.values())
accumulator_cls = None
def init_accumulator(self):
return self.accumulator_cls(self.top_ids, self.dictionary)
def init_accumulator2(self):
return self.accumulator_cls(self.top_ids2, self.dictionary2)
def test_occurrence_counting(self):
accumulator = self.init_accumulator().accumulate(self.texts, 3)
self.assertEqual(3, accumulator.get_occurrences("this"))
self.assertEqual(1, accumulator.get_occurrences("is"))
self.assertEqual(1, accumulator.get_occurrences("a"))
self.assertEqual(2, accumulator.get_co_occurrences("test", "document"))
self.assertEqual(2, accumulator.get_co_occurrences("test", "this"))
self.assertEqual(1, accumulator.get_co_occurrences("is", "a"))
def test_occurrence_counting2(self):
accumulator = self.init_accumulator2().accumulate(self.texts2, 110)
self.assertEqual(2, accumulator.get_occurrences("human"))
self.assertEqual(4, accumulator.get_occurrences("user"))
self.assertEqual(3, accumulator.get_occurrences("graph"))
self.assertEqual(3, accumulator.get_occurrences("trees"))
cases = [
(1, ("human", "interface")),
(2, ("system", "user")),
(2, ("graph", "minors")),
(2, ("graph", "trees")),
(4, ("user", "user")),
(3, ("graph", "graph")),
(0, ("time", "eps"))
]
for expected_count, (word1, word2) in cases:
# Verify co-occurrence counts are correct, regardless of word order.
self.assertEqual(expected_count, accumulator.get_co_occurrences(word1, word2))
self.assertEqual(expected_count, accumulator.get_co_occurrences(word2, word1))
# Also verify that using token ids instead of tokens works the same.
word_id1 = self.dictionary2.token2id[word1]
word_id2 = self.dictionary2.token2id[word2]
self.assertEqual(expected_count, accumulator.get_co_occurrences(word_id1, word_id2))
self.assertEqual(expected_count, accumulator.get_co_occurrences(word_id2, word_id1))
def test_occurences_for_irrelevant_words(self):
accumulator = self.init_accumulator().accumulate(self.texts, 2)
with self.assertRaise | s(KeyError):
accumulator.get_occurrences("irrelevant")
with self.assertRaises(KeyError):
accumulator.get_co_occurrences("test", "irrelevant")
class TestInvertedIndexAccumulator(BaseTestCases.TextAnalyzerTestBase):
| accumulator_cls = InvertedIndexAccumulator
def test_accumulate1(self):
accumulator = InvertedIndexAccumulator(self.top_ids, self.dictionary)\
.accumulate(self.texts, 2)
# [['this', 'is'], ['is', 'a'], ['test', 'document'], ['this', 'test'],
# ['test', 'document'], ['test', 'test'], ['test', 'this']]
inverted_index = accumulator.index_to_dict()
expected = {
10: {0, 3, 6},
15: {0, 1},
20: {1},
21: {2, 3, 4, 5, 6},
17: {2, 4}
}
self.assertDictEqual(expected, inverted_index)
def test_accumulate2(self):
accumulator = InvertedIndexAccumulator(self.top_ids, self.dictionary)\
.accumulate(self.texts, 3)
# [['this', 'is', 'a'], ['test', 'document'], ['this', 'test', 'document'],
# ['test', 'test', 'this']
inverted_index = accumulator.index_to_dict()
expected = {
10: {0, 2, 3},
15: {0},
20: {0},
21: {1, 2, 3},
17: {1, 2}
}
self.assertDictEqual(expected, inverted_index)
class TestWordOccurrenceAccumulator(BaseTestCases.TextAnalyzerTestBase):
accumulator_cls = WordOccurrenceAccumulator
class TestParallelWordOccurrenceAccumulator(BaseTestCases.TextAnalyzerTestBase):
accumulator_cls = ParallelWordOccurrenceAccumulator
def init_accumulator(self):
return self.accumulator_cls(2, self.top_ids, self.dictionary)
def init_accumulator2(self):
return self.accumulator_cls(2, self.top_ids2, self.dictionary2)
class TestCorpusAnalyzer(unittest.TestCase):
def setUp(self):
self.dictionary = BaseTestCases.TextAnalyzerTestBase.dictionary
self.top_ids = BaseTestCases.TextAnalyzerTestBase.top_ids
self.corpus = \
[self.dictionary.doc2bow(doc) for doc in BaseTestCases.TextAnalyzerTestBase.texts]
def test_index_accumulation(self):
accumulator = CorpusAccumulator(self.top_ids).accumulate(self.corpus)
inverted_index = accumulator.index_to_dict()
expected = {
10: {0, 2, 3},
15: {0},
20: {0},
21: {1, 2, 3},
17: {1, 2}
}
self.assertDictEqual(expected, inverted_index)
self.assertEqual(3, accumulator.get_occurrences(10))
self.assertEqual(2, accumulator.get_occurrences(17))
self.assertEqual(2, accumulator.get_co_occurrences(10, 21))
self.assertEqual(1, accumulator.get_co_occurrences(10, 17))
if __name__ == '__main__':
logging.root.setLevel(logging.WARNING)
unittest.main()
|
tierney/mustached-bear | tools/quote_test.py | Python | bsd-3-clause | 5,459 | 0.006228 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import quote
import sys
import unittest
verbose = False
# Wrapped versions of the functions that we're testing, so that during
# debugging we can more easily see what their inputs were.
def VerboseQuote(in_string, specials, *args, **kwargs):
if verbose:
sys.stdout.write('Invoking quote(%s, %s, %s)\n' %
(repr(in_string), repr(specials),
', '.join([repr(a) for a in args] +
[repr(k) + ':' + repr(v)
for k, v in kwargs])))
return quote.quote(in_string, specials, *args, **kwargs)
def VerboseUnquote(in_string, specials, *args, **kwargs):
if verbose:
sys.stdout.write('Invoking unquote(%s, %s, %s)\n' %
(repr(in_string), repr(specials),
', '.join([repr(a) for a in args] +
[repr(k) + ':' + repr(v)
for k, v in kwargs])))
return quote.unquote(in_string, specials, *args, **kwargs)
class TestQuote(unittest.TestCase):
# test utilities
def generic_test(se | lf, fn, in_args, expected_out_obj):
actual = apply(fn, in_args)
self.assertEqual(actual, expected_out_obj)
def check_invertible(self, in_string, specials, escape='\\'):
q | = VerboseQuote(in_string, specials, escape)
qq = VerboseUnquote(q, specials, escape)
self.assertEqual(''.join(qq), in_string)
def run_test_tuples(self, test_tuples):
for func, in_args, expected in test_tuples:
self.generic_test(func, in_args, expected)
def testQuote(self):
test_tuples = [[VerboseQuote,
['foo, bar, baz, and quux too!', 'abc'],
'foo, \\b\\ar, \\b\\az, \\and quux too!'],
[VerboseQuote,
['when \\ appears in the input', 'a'],
'when \\\\ \\appe\\ars in the input']]
self.run_test_tuples(test_tuples)
def testUnquote(self):
test_tuples = [[VerboseUnquote,
['key\\:still_key:value\\:more_value', ':'],
['key:still_key', ':', 'value:more_value']],
[VerboseUnquote,
['about that sep\\ar\\ator in the beginning', 'ab'],
['', 'ab', 'out th', 'a', 't separator in the ',
'b', 'eginning']],
[VerboseUnquote,
['the rain in spain fall\\s ma\\i\\nly on the plains',
'ins'],
['the ra', 'in', ' ', 'in', ' ', 's', 'pa', 'in',
' falls mainly o', 'n', ' the pla', 'ins']],
]
self.run_test_tuples(test_tuples)
def testInvertible(self):
self.check_invertible('abcdefg', 'bc')
self.check_invertible('a\\bcdefg', 'bc')
self.check_invertible('ab\\cdefg', 'bc')
self.check_invertible('\\ab\\cdefg', 'abc')
self.check_invertible('abcde\\fg', 'efg')
self.check_invertible('a\\b', '')
# Invoke this file directly for simple manual testing. For running
# the unittests, use the -t flag. Any flags to be passed to the
# unittest module should be passed as after the optparse processing,
# e.g., "quote_test.py -t -- -v" to pass the -v flag to the unittest
# module.
def main(argv):
global verbose
parser = optparse.OptionParser(
usage='Usage: %prog [options] word...')
parser.add_option('-s', '--special-chars', dest='special_chars', default=':',
help='Special characters to quote (default is ":")')
parser.add_option('-q', '--quote', dest='quote', default='\\',
help='Quote or escape character (default is "\")')
parser.add_option('-t', '--run-tests', dest='tests', action='store_true',
help='Run built-in tests\n')
parser.add_option('-u', '--unquote-input', dest='unquote_input',
action='store_true', help='Unquote command line argument')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose test output')
options, args = parser.parse_args(argv)
if options.verbose:
verbose = True
num_errors = 0
if options.tests:
sys.argv = [sys.argv[0]] + args
unittest.main()
else:
for word in args:
# NB: there are inputs x for which quote(unquote(x) != x, but
# there should be no input x for which unquote(quote(x)) != x.
if options.unquote_input:
qq = quote.unquote(word, options.special_chars, options.quote)
sys.stdout.write('unquote(%s) = %s\n'
% (word, ''.join(qq)))
# There is no expected output for unquote -- this is just for
# manual testing, so it is okay that we do not (and cannot)
# update num_errors here.
else:
q = quote.quote(word, options.special_chars, options.quote)
qq = quote.unquote(q, options.special_chars, options.quote)
sys.stdout.write('quote(%s) = %s, unquote(%s) = %s\n'
% (word, q, q, ''.join(qq)))
if word != ''.join(qq):
num_errors += 1
if num_errors > 0:
sys.stderr.write('[ FAILED ] %d test failures\n' % num_errors)
return num_errors
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
battlemidget/cage-cmf | app/system/interface.py | Python | mit | 2,396 | 0.002087 | import os
import re
import sys
import logging
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.autoreload
from tornado.process import fork_processes, task_id
from tornado.log import app_log
from tornado.options import define, options, parse_command_line
from app.conf import Config
cfg = Config()
define("port", default=9000, help="port", type=int)
define("debug", default=False, help="run in debug mode", type=bool)
class RunApp(object):
def __init__(self, commons):
self.commons = commons
self.commons['safe_mode'] = False
parse_command_line()
def run(self):
if options.debug:
app_log.setLevel(logging.DEBUG)
if not options.debug:
fork_processes(None)
options.port += task_id() or 0
app_log.debug("Starting %s on port %s" % (cfg.platform_name, options.port))
# initialize the application
tornado.httpserver.HTTPServer(Application(self.commons)).listen(options.port, ' | 0.0.0.0')
ioloop = tornado.ioloop.IOLoop.instance()
if options.debug:
tornado.autoreload.start(ioloop)
# enter the Tornado IO loop
ioloop.start()
class Application(tornado.web.Application):
def __init__(self, commons):
| self.commons = commons
self.cfg = cfg
self.cage_path = self.commons['script_location']
app_log.debug("Application path (%s)" % (self.cage_path,))
# Set application path in config
self.cfg.app_path = self.cage_path
urls = [
(r"/", "app.controllers.views.IndexHandler"),
(r"/entity/([A-Za-z0-9-]+$)",
"app.controllers.views.EntityHandler"),
(r"/entity/search/(.*)/?",
"app.controllers.views.EntitySearchHandler"),
(r"/entity/new/(.*)/?",
"app.controllers.views.EntityNewHandler"),
(r"/entity/modify/(.*)/(.*)/?",
"app.controllers.views.EntityModifyHandler"),
]
ui_modules_map = {}
settings = dict(
template_path=None,
static_path=None,
xsrf_cookies=False if options.debug else True,
cookie_secret=self.cfg.cookie_secret,
debug=options.debug,
ui_modules=ui_modules_map,
)
tornado.web.Application.__init__(self, urls, **settings)
|
reviewboard/reviewboard | reviewboard/scmtools/tests/test_svn.py | Python | mit | 39,691 | 0 | # coding=utf-8
import os
import unittest
from hashlib import md5
from django.conf import settings
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.diffviewer.diffutils import patch
from reviewboard.diffviewer.testing.mixins import DiffParserTestingMixin
from reviewboard.scmtools.core import (Branch, Commit, Revision, HEAD,
PRE_CREATION)
from reviewboard.scmtools.errors import SCMError, FileNotFoundError
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.scmtools.svn import SVNTool, recompute_svn_backend
from reviewboard.scmtools.svn.utils import (collapse_svn_keywords,
has_expanded_svn_keywords)
from reviewboard.scmtools.tests.testcases import SCMTestCase
from reviewboard.testing.testcase import TestCase
class _CommonSVNTestCase(DiffParserTestingMixin, SpyAgency, SCMTestCase):
"""Common unit tests for Subversion.
This is meant to be subclassed for each backend that wants to run
the common set of tests.
"""
backend = None
backend_name = None
fixtures = ['test_scmtools']
__test__ = False
def setUp(self):
super(_CommonSVNTestCase, self).setUp()
self._old_backend_setting = settings.SVNTOOL_BACKENDS
settings.SVNTOOL_BACKENDS = [self.backend]
recompute_svn_backend()
self.svn_repo_path = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'..', 'testdata', 'svn_repo'))
self.svn_ssh_path = ('svn+ssh://localhost%s'
% self.svn_repo_path.replace('\\', '/'))
self.repository = Repository.objects.create(
name='Subversion SVN',
path='file://%s' % self.svn_repo_path,
tool=Tool.objects.get(name='Subversion'))
try:
self.tool = self.repository.get_scmtool()
except ImportError:
raise unittest.SkipTest('The %s backend could not be used. A '
'dependency may be missing.'
% self.backend)
assert self.tool.client.__class__.__module__ == self.backend
def tearDown(self):
super(_CommonSVNTestCase, self).tearDown()
settings.SVNTOOL_BACKENDS = self._old_backend_setting
recompute_svn_backend()
def shortDescription(self):
desc = super(_CommonSVNTestCase, self).shortDescription()
desc = desc.replace('<backend>', self.backend_ | name)
return desc
def test_get_repository_info(self):
"""Testing SVN (<backend>) get_repository_info"""
info = self.tool.get_repository_info()
self.assertIn('uuid', info)
self.assertIsInstance(info['uuid'], str)
| self.assertEqual(info['uuid'], '41215d38-f5a5-421f-ba17-e0be11e6c705')
self.assertIn('root_url', info)
self.assertIsInstance(info['root_url'], str)
self.assertEqual(info['root_url'], self.repository.path)
self.assertIn('url', info)
self.assertIsInstance(info['url'], str)
self.assertEqual(info['url'], self.repository.path)
def test_ssh(self):
"""Testing SVN (<backend>) with a SSH-backed Subversion repository"""
self._test_ssh(self.svn_ssh_path, 'trunk/doc/misc-docs/Makefile')
def test_ssh_with_site(self):
"""Testing SVN (<backend>) with a SSH-backed Subversion repository
with a LocalSite
"""
self._test_ssh_with_site(self.svn_ssh_path,
'trunk/doc/misc-docs/Makefile')
def test_get_file(self):
"""Testing SVN (<backend>) get_file"""
tool = self.tool
expected = (b'include ../tools/Makefile.base-vars\n'
b'NAME = misc-docs\n'
b'OUTNAME = svn-misc-docs\n'
b'INSTALL_DIR = $(DESTDIR)/usr/share/doc/subversion\n'
b'include ../tools/Makefile.base-rules\n')
# There are 3 versions of this test in order to get 100% coverage of
# the svn module.
rev = Revision('2')
filename = 'trunk/doc/misc-docs/Makefile'
value = tool.get_file(filename, rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
value = tool.get_file('/%s' % filename, rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
value = tool.get_file('%s/%s' % (self.repository.path, filename), rev)
self.assertIsInstance(value, bytes)
self.assertEqual(value, expected)
with self.assertRaises(FileNotFoundError):
tool.get_file('')
def test_file_exists(self):
"""Testing SVN (<backend>) file_exists"""
tool = self.tool
self.assertTrue(tool.file_exists('trunk/doc/misc-docs/Makefile'))
self.assertFalse(tool.file_exists('trunk/doc/misc-docs/Makefile2'))
with self.assertRaises(FileNotFoundError):
tool.get_file('hello', PRE_CREATION)
def test_get_file_with_special_url_chars(self):
"""Testing SVN (<backend>) get_file with filename containing
characters that are special in URLs and repository path as a URI
"""
value = self.tool.get_file('trunk/crazy& ?#.txt', Revision('12'))
self.assertTrue(isinstance(value, bytes))
self.assertEqual(value, b'Lots of characters in this one.\n')
def test_file_exists_with_special_url_chars(self):
"""Testing SVN (<backend>) file_exists with filename containing
characters that are special in URLs
"""
self.assertTrue(self.tool.file_exists('trunk/crazy& ?#.txt',
Revision('12')))
# These should not crash. We'll be testing both file:// URLs
# (which fail for anything lower than ASCII code 32) and for actual
# URLs (which support all characters).
self.assertFalse(self.tool.file_exists('trunk/%s.txt' % ''.join(
chr(c)
for c in range(32, 128)
)))
self.tool.client.repopath = 'svn+ssh://localhost:0/svn'
try:
self.assertFalse(self.tool.file_exists('trunk/%s.txt' % ''.join(
chr(c)
for c in range(128)
)))
except SCMError:
# Couldn't connect. Valid result.
pass
def test_normalize_path_with_special_chars_and_remote_url(self):
"""Testing SVN (<backend>) normalize_path with special characters
and remote URL
"""
client = self.tool.client
client.repopath = 'svn+ssh://example.com/svn'
path = client.normalize_path(''.join(
chr(c)
for c in range(128)
))
# This URL was generated based on modified code that directly used
# Subversion's lookup take explicitly, ensuring we're getting the
# results we want from urllib.quote() and our list of safe characters.
self.assertEqual(
path,
"svn+ssh://example.com/svn/%00%01%02%03%04%05%06%07%08%09%0A"
"%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E"
"%1F%20!%22%23$%25&'()*+,-./0123456789:%3B%3C=%3E%3F@ABCDEFGH"
"IJKLMNOPQRSTUVWXYZ%5B%5C%5D%5E_%60abcdefghijklmnopqrstuvwxyz"
"%7B%7C%7D~%7F")
def test_normalize_path_with_special_chars_and_file_url(self):
"""Testing SVN (<backend>) normalize_path with special characters
and local file:// URL
"""
client = self.tool.client
client.repopath = 'file:///tmp/svn'
path = client.normalize_path(''.join(
chr(c)
for c in range(32, 128)
))
# This URL was generated based on modified code that directly used
# Subversion's lookup take explicitly, ensuring we're getting the
# results we want from urllib.quote() and our list of safe characters.
self.assertEqual(
path,
"file:///tmp/svn/%20!%22%23$%25&'()*+,-./0123456789:%3B%3C=%3E"
|
mahak/spark | python/pyspark/ml/recommendation.py | Python | apache-2.0 | 24,774 | 0.001655 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from typing import Any, Dict, Optional, TYPE_CHECKING
from pyspark import since, keyword_only
from pyspark.ml.param.shared import (
HasPredictionCol,
HasBlockSize,
HasMaxIter,
HasRegParam,
HasCheckpointInterval,
HasSeed,
)
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.common import inherit_doc
from pyspark.ml.param import Params, TypeConverters, Param
from pyspark.ml.util import JavaMLWritable, JavaMLReadable
from pyspark.sql import DataFrame
if TYPE_CHECKING:
from py4j.java_gateway import JavaObject
__all__ = ["ALS", "ALSModel"]
@inherit_doc
class _ALSModelParams(HasPredictionCol, HasBlockSize):
"""
Params for :py:class:`ALS` and :py:class:`ALSModel`.
.. versionadded:: 3.0.0
"""
userCol: Param[str] = Param(
Params._dummy(),
"userCol",
"column name for user ids. Ids must be within " + "the integer value range.",
typeConverter=TypeConverters.toString,
)
itemCol: Param[str] = Param(
Params._dummy(),
"itemCol",
"column name for item ids. Ids must be within " + "the integer value range.",
typeConverter=TypeConverters.toString,
)
coldStartStrategy: Param[str] = Param(
Params._dummy(),
"coldStartStrategy",
"strategy for dealing with "
+ "unknown or new users/items at prediction time. This may be useful "
+ "in cross-validation or production scenarios, for handling "
+ "user/item ids the model has not seen in the training data. "
+ "Supported values: 'nan', 'drop'.",
typeConverter=TypeConverters.toString,
)
def __init__(self, *args: Any):
super(_ALSModelParams, self).__init__(*args)
self._setDefault(blockSize=4096)
@since("1.4.0")
def getUserCol(self) -> str:
"""
Gets the value of userCol or its default value.
"""
return self.getOrDefault(self.userCol)
@since("1.4.0")
def getItemCol(self) -> str:
"""
Gets the value of itemCol or its default value.
"""
return self.getOrDefault(self.itemCol)
@since("2.2.0")
def getColdStartStrategy(self) -> str:
"""
Gets the value of coldStartStrategy or its default value.
"""
return self.getOrDefault(self.coldStartStrategy)
@inherit_doc
class _ALSParams(_ALSModelParams, HasMaxIter, HasRegParam, HasCheckpointInterval, HasSeed):
"""
Params for :py:class:`ALS`.
.. versionadded:: 3.0.0
"""
rank: Param[ | int] = Param(
Params._dummy(), "rank", "rank of the factorization", typeConverter=TypeConverters.toInt
)
numUserBlocks: Param[int] = Param(
Params._dummy(),
"numUserBlocks",
"number of user blocks",
typeConverter=TypeConverters.toInt,
)
numItemBlocks: Param[int] = Param(
Params._dummy(),
"numItemBlocks",
"number of item blocks",
typeConverter=TypeConverters.toInt,
| )
implicitPrefs: Param[bool] = Param(
Params._dummy(),
"implicitPrefs",
"whether to use implicit preference",
typeConverter=TypeConverters.toBoolean,
)
alpha: Param[float] = Param(
Params._dummy(),
"alpha",
"alpha for implicit preference",
typeConverter=TypeConverters.toFloat,
)
ratingCol: Param[str] = Param(
Params._dummy(),
"ratingCol",
"column name for ratings",
typeConverter=TypeConverters.toString,
)
nonnegative: Param[bool] = Param(
Params._dummy(),
"nonnegative",
"whether to use nonnegative constraint for least squares",
typeConverter=TypeConverters.toBoolean,
)
intermediateStorageLevel: Param[str] = Param(
Params._dummy(),
"intermediateStorageLevel",
"StorageLevel for intermediate datasets. Cannot be 'NONE'.",
typeConverter=TypeConverters.toString,
)
finalStorageLevel: Param[str] = Param(
Params._dummy(),
"finalStorageLevel",
"StorageLevel for ALS model factors.",
typeConverter=TypeConverters.toString,
)
def __init__(self, *args: Any):
super(_ALSParams, self).__init__(*args)
self._setDefault(
rank=10,
maxIter=10,
regParam=0.1,
numUserBlocks=10,
numItemBlocks=10,
implicitPrefs=False,
alpha=1.0,
userCol="user",
itemCol="item",
ratingCol="rating",
nonnegative=False,
checkpointInterval=10,
intermediateStorageLevel="MEMORY_AND_DISK",
finalStorageLevel="MEMORY_AND_DISK",
coldStartStrategy="nan",
)
@since("1.4.0")
def getRank(self) -> int:
"""
Gets the value of rank or its default value.
"""
return self.getOrDefault(self.rank)
@since("1.4.0")
def getNumUserBlocks(self) -> int:
"""
Gets the value of numUserBlocks or its default value.
"""
return self.getOrDefault(self.numUserBlocks)
@since("1.4.0")
def getNumItemBlocks(self) -> int:
"""
Gets the value of numItemBlocks or its default value.
"""
return self.getOrDefault(self.numItemBlocks)
@since("1.4.0")
def getImplicitPrefs(self) -> bool:
"""
Gets the value of implicitPrefs or its default value.
"""
return self.getOrDefault(self.implicitPrefs)
@since("1.4.0")
def getAlpha(self) -> float:
"""
Gets the value of alpha or its default value.
"""
return self.getOrDefault(self.alpha)
@since("1.4.0")
def getRatingCol(self) -> str:
"""
Gets the value of ratingCol or its default value.
"""
return self.getOrDefault(self.ratingCol)
@since("1.4.0")
def getNonnegative(self) -> bool:
"""
Gets the value of nonnegative or its default value.
"""
return self.getOrDefault(self.nonnegative)
@since("2.0.0")
def getIntermediateStorageLevel(self) -> str:
"""
Gets the value of intermediateStorageLevel or its default value.
"""
return self.getOrDefault(self.intermediateStorageLevel)
@since("2.0.0")
def getFinalStorageLevel(self) -> str:
"""
Gets the value of finalStorageLevel or its default value.
"""
return self.getOrDefault(self.finalStorageLevel)
@inherit_doc
class ALS(JavaEstimator["ALSModel"], _ALSParams, JavaMLWritable, JavaMLReadable["ALS"]):
"""
Alternating Least Squares (ALS) matrix factorization.
ALS attempts to estimate the ratings matrix `R` as the product of
two lower-rank matrices, `X` and `Y`, i.e. `X * Yt = R`. Typically
these approximations are called 'factor' matrices. The general
approach is iterative. During each iteration, one of the factor
matrices is held constant, while the other is solved for using least
squares. The newly-solved factor matrix is then held constant while
solving for the other factor matrix.
This is a blocked implementation of the ALS factorization algorithm
that groups the two sets of factors (referred to as "users" and
"products") into blocks and reduces communicat |
xdega/.dotfiles | .vim/.vim/R/synctex_evince_forward.py | Python | mit | 5,473 | 0.007491 |
# The code in this files is borrowed from Gedit Synctex plugin.
#
# Copyright (C) 2010 Jose Aliste
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public Licence as published by the Free Software
# Foundation; either version 2 of the Licence, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public Licence for more
# details.
#
# You should have received a copy of the GNU General Public Licence along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301, USA
# Modified to Nvim-R by Jakson Aquino
import dbus
import dbus.mainloop.glib, sys, os
from gi.repository import GObject
RUNNING, CLOSED = range(2)
EV_DAEMON_PATH = "/org/gnome/evince/Daemon"
EV_DAEMON_NAME = "org.gnome.evince.Daemon"
EV_DAEMON_IFACE = "org.gnome.evince.Daemon"
EVINCE_PATH = "/org/gnome/evince/Evince"
EVINCE_IFACE = "org.gnome.evince.Application"
EV_WINDOW_IFACE = "org.gnome.evince.Window"
class EvinceWindowProxy:
"""A DBUS proxy for an Evince Window."""
daemon = None
bus = None
def __init__(self, uri, spawn = False):
self.uri = uri
self.spawn = spawn
self.status = CLOSED
self.source_handler = None
self.dbus_name = ''
self._handler = None
try:
if EvinceWindowProxy.bus is None:
EvinceWindowProxy.bus = dbus.SessionBus()
if EvinceWindowProxy.daemon is None:
EvinceWindowProxy.daemon = EvinceWindowProxy.bus.get_object(EV_DAEMON_NAME,
EV_DAEMON_PATH,
follow_name_owner_changes=True)
EvinceWindowProxy.bus.add_signal_receiver(self._on_doc_loaded, signal_name="DocumentLoaded",
dbus_interface = EV_WINDOW_IFACE,
sender_keyword='sender')
self._get_dbus_name(False)
except dbus.DBusException:
sys.stderr.write("Could not connect to the Evince Daemon")
sys.stderr.flush()
def _on_doc_loaded(self, uri, **keyargs):
if uri == self.uri and self._handler is None:
self.handle_find_document_reply(keyargs['sender'])
def _get_dbus_name(self, spawn):
EvinceWindowProxy.daemon.FindDocument(self.uri,spawn,
reply_handler=self.handle_find_document_reply,
error_handler=self.handle_find_document_error,
dbus_interface = EV_DAEMON_IFACE)
def handle_find_document_error(self, error):
sys.stderr.write("FindDocument DBus call has failed")
sys.stderr.flush()
def handle_find_document_reply(self, evince_name):
if self._handler is not None:
handler = self._handler
else:
handler = self.handle_get_window_list_reply
if evince_name != '':
self.dbus_name = evince_name
self.status = RUNNING
self.evince = EvinceWindowProxy.bus.get_object(self.dbus_name, EVINCE_PATH)
self.evince.GetWindowList(dbus_interface = EVINCE_IFACE,
reply_handler = handler,
error_handler = self.handle_get_window_list_error)
def handle_get_window_list_error (self, e):
sys.stderr.write("GetWindowList DBus call has failed")
sys.stderr.flush()
def handle_get_window_list_reply (self, window_list):
if len(window_list) > 0:
window_obj = EvinceWindowProxy.bus.get_object(self.dbus_name, window_list[0])
self.window = dbus.Interface(window_obj,EV_WINDOW_IFACE)
else:
#That should never happen.
sys.stderr.write("GetWindowList returned empty list")
sys.stderr.flush()
def SyncView(self, input_file, data):
if self.status == CLOSED:
if self.spawn:
self._tmp_syncview = [input_file, data, 0];
self._handler = self._syncview_handler
self._get_dbus_name(True)
sys.stdout.write("call Evince_Again()")
| sys.stdout.flush()
else:
self.window.SyncView(input_file, data, 0, dbus_interface = "org.gnome.evince.Window")
sys.stdout.write("let g:rplugin_evince_loop = 0")
sys.stdout.flush()
def _syncview_handler(self, window_list):
self.handle_get_window_list_reply(window_list)
if self.status == CLOSED:
return False
self.window.SyncView(self._tmp_syncview[0],self._tmp_syncview[1], self._tmp_syncview[ | 2], dbus_interface="org.gnome.evince.Window")
del self._tmp_syncview
self._handler = None
return True
path_input = sys.argv[1]
path_output = sys.argv[2]
line_number = int(sys.argv[3])
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
a = EvinceWindowProxy('file://' + path_output, True)
def sync_view(ev_window, path_input, line_number):
ev_window.SyncView(path_input, (line_number, 1))
loop.quit()
GObject.timeout_add(400, sync_view, a, path_input, line_number)
loop = GObject.MainLoop()
loop.run()
|
Metonimie/Beaglebone | alexa-amazon/basicServer/gpio_handlers.py | Python | gpl-3.0 | 525 | 0.00381 | """
/*
* Custom handlers for the BBB
*
*/
"""
import Adafruit_BBIO.GPIO as GPIO
GPIO.setup("P9_12", GPIO.OUT)
def alexaHandler(client, userdata, message):
print "Received payload: " + str(message.payload.decode())
# Assume only 1 and 0 are send here.
if message.payload == "1":
GPIO.output("P9_12", GPIO.HIGH)
| print "Turned christmas tree On"
elif message.payload == "0":
GPIO.output("P9_12 | ", GPIO.LOW)
print "Turned christmas tree Off"
def cleanUp():
GPIO.cleanup()
|
leegao/readme2tex | readme2tex/__init__.py | Python | mit | 26 | 0.038462 | from . | render import | render |
ros/catkin | python/catkin/workspace_vcs.py | Python | bsd-3-clause | 76 | 0 | # f | or backward compatibility
from catkin | _pkg.workspace_vcs import * # noqa
|
pelson/conda-build-all | setup.py | Python | bsd-3-clause | 752 | 0.00266 | from setuptools import setup
import versioneer
setup(
name='conda-build-all',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Build all c | onda recipes within a directory/repository.',
author='Phil Elson',
author_email='pelson.pub@gmail.com',
url='https://github.com/scitools/conda-buildall',
packages=['conda_build_all'],
entry_points={
'console_scripts': [
'conda-build-all = conda_build_all.cli:main',
# This is needed as conda can't deal with dashes in subcommands yet
# (see https://github.com/conda/conda/p | ull/1840).
'conda-buildall = conda_build_all.cli:main',
]
},
)
|
DinoV/PTVS | Python/Tests/TestData/Grammar/Ellipsis.py | Python | apache-2.0 | 17 | 0 | fob(...)
1 + . | ..
| |
Canpio/Paddle | python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py | Python | apache-2.0 | 7,871 | 0.000508 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.dataset.conll05 as conll05
import paddle.fluid as fluid
import unittest
import paddle
import numpy as np
import os
word_dict, verb_dict, label_dict = conll05.get_dict()
word_dict_len = len(word_dict)
label_dict_len = len(label_dict)
pred_dict_len = len(verb_dict)
mark_dict_len = 2
word_dim = 32
mark_dim = 5
hidden_dim = 512
depth = 8
mix_hidden_lr = 1e-3
embedding_name = 'emb'
def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark,
is_sparse, **ignored):
# 8 features
predicate_embedding = fluid.layers.embedding(
input=predicate,
is_sparse=is_sparse,
size=[pred_dict_len, word_dim],
dtype='float32',
param_attr='vemb')
mark_embedding = fluid.layers.embedding(
input=mark,
is_sparse=is_sparse,
size=[mark_dict_len, mark_dim],
dtype='float32')
word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
emb_layers = [
fluid.layers.embedding(
size=[word_dict_len, word_dim],
is_sparse=is_sparse,
input=x,
param_attr=fluid.ParamAttr(
name=embedding | _name, trainable=False)) for x in word_input
]
emb_layers.append(predicate_embedding)
emb_layers.append(mark_embedding)
hidden_0_layers = [
fluid.layers.fc(input=emb, size=hidden_dim, act='tanh')
for emb in emb_layers
]
hidden_0 = fluid.layers.sums(input=hidden_0_layers) |
lstm_0 = fluid.layers.dynamic_lstm(
input=hidden_0,
size=hidden_dim,
candidate_activation='relu',
gate_activation='sigmoid',
cell_activation='sigmoid')
# stack L-LSTM and R-LSTM with direct edges
input_tmp = [hidden_0, lstm_0]
for i in range(1, depth):
mix_hidden = fluid.layers.sums(input=[
fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'),
fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh')
])
lstm = fluid.layers.dynamic_lstm(
input=mix_hidden,
size=hidden_dim,
candidate_activation='relu',
gate_activation='sigmoid',
cell_activation='sigmoid',
is_reverse=((i % 2) == 1))
input_tmp = [mix_hidden, lstm]
feature_out = fluid.layers.sums(input=[
fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'),
fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh')
])
return feature_out
class TestCRFModel(unittest.TestCase):
def check_network_convergence(self,
is_sparse,
build_strategy=None,
use_cuda=True):
os.environ['CPU_NUM'] = str(4)
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
word = fluid.layers.data(
name='word_data', shape=[1], dtype='int64', lod_level=1)
predicate = fluid.layers.data(
name='verb_data', shape=[1], dtype='int64', lod_level=1)
ctx_n2 = fluid.layers.data(
name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1)
ctx_n1 = fluid.layers.data(
name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1)
ctx_0 = fluid.layers.data(
name='ctx_0_data', shape=[1], dtype='int64', lod_level=1)
ctx_p1 = fluid.layers.data(
name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1)
ctx_p2 = fluid.layers.data(
name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1)
mark = fluid.layers.data(
name='mark_data', shape=[1], dtype='int64', lod_level=1)
feature_out = db_lstm(**locals())
target = fluid.layers.data(
name='target', shape=[1], dtype='int64', lod_level=1)
crf_cost = fluid.layers.linear_chain_crf(
input=feature_out,
label=target,
param_attr=fluid.ParamAttr(
name='crfw', learning_rate=1e-1))
avg_cost = fluid.layers.mean(crf_cost)
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=0.01,
decay_steps=100000,
decay_rate=0.5,
staircase=True))
sgd_optimizer.minimize(avg_cost)
train_data = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.conll05.test(), buf_size=8192),
batch_size=16)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup)
pe = fluid.ParallelExecutor(
use_cuda=use_cuda,
loss_name=avg_cost.name,
build_strategy=build_strategy)
feeder = fluid.DataFeeder(
feed_list=[
word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate,
mark, target
],
place=fluid.CPUPlace())
data = train_data()
for i in xrange(10):
cur_batch = next(data)
print map(np.array,
pe.run(feed=feeder.feed(cur_batch),
fetch_list=[avg_cost.name]))[0]
def test_update_sparse_parameter_all_reduce(self):
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce
self.check_network_convergence(
is_sparse=True, build_strategy=build_strategy, use_cuda=True)
self.check_network_convergence(
is_sparse=True, build_strategy=build_strategy, use_cuda=False)
def test_update_dense_parameter_all_reduce(self):
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce
self.check_network_convergence(
is_sparse=False, build_strategy=build_strategy, use_cuda=True)
self.check_network_convergence(
is_sparse=False, build_strategy=build_strategy, use_cuda=False)
def test_update_sparse_parameter_reduce(self):
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
self.check_network_convergence(
is_sparse=True, build_strategy=build_strategy, use_cuda=True)
self.check_network_convergence(
is_sparse=True, build_strategy=build_strategy, use_cuda=False)
def test_update_dense_parameter_reduce(self):
build_strategy = fluid.BuildStrategy()
build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce
self.check_network_convergence(
is_sparse=False, build_strategy=build_strategy, use_cuda=True)
self.check_network_convergence(
is_sparse=False, build_strategy=build_strategy, use_cuda=False)
if __name__ == '__main__':
unittest.main()
|
eevee/cocos2d-mirror | test/test_fadeout_layer.py | Python | bsd-3-clause | 802 | 0.028678 | # This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "s, t 1, s, t 2, s, t 3, s, t 4.1, s, t 4.2, s, q"
tags = "FadeIn, FadeOut, ColorLayer"
import pyglet
from pyglet.gl import *
import cocos
from cocos.director import director
from cocos.actions import *
from cocos.layer import *
def main():
director.init( resizable=True )
main_scene = cocos.scene.Scene()
l = ColorLayer( 255,128,64,64 )
main | _scene.add( l, z=0 )
l.do( FadeOut( duration=2) + FadeIn( duration=2) )
director.run (main_scene)
description = """
A ColorLayer is faded-out and fadded-in.
Notice this will not work for arbitrary Lay | er objects.
"""
if __name__ == '__main__':
main()
|
huyphan/pyyawhois | test/record/parser/test_response_whois_ax_status_available.py | Python | mit | 1,334 | 0.005247 |
# This file is autogenerated. Do not edit it | manually.
# If you want change the content of this file, edit
#
# spec/fixtures/responses/whois.ax/status_available
#
# and regenerate the tests with the following script
#
# $ scripts/generate_tests.py
#
from | nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisAxStatusAvailable(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.ax/status_available.txt"
host = "whois.ax"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'available')
def test_available(self):
eq_(self.record.available, True)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(self.record.nameservers, [])
def test_registered(self):
eq_(self.record.registered, False)
def test_created_on(self):
eq_(self.record.created_on, None)
def test_updated_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.updated_on)
def test_expires_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.expires_on)
|
benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/system/systementitytype_args.py | Python | apache-2.0 | 1,188 | 0.023569 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class systementitytype_args :
ur""" Provides additional arguments required for fetching the systeme | ntitytype resource.
"""
def __init__(self) :
self._datasource = ""
@property
def datasource(self) :
ur"""Specifies the source which contains all the stored counter values.
"""
try :
return self._datasource
except Exception as e:
raise e
@datasource.setter
def datasource(self, datasource) :
ur"""Specifies the source which contai | ns all the stored counter values.
"""
try :
self._datasource = datasource
except Exception as e:
raise e
|
lausser/coshsh | tests/recipes/test9/classes/os_linux.py | Python | agpl-3.0 | 1,048 | 0.009542 | import coshsh
from coshsh.application import Application
from coshsh.templaterule import TemplateRule
from coshsh.util import compare_attr
def __mi_ident__(params={}):
if coshsh.util.compare_attr("type", params, ".*red\s*hat.*|.*sles.*|.*l | inux.*|.*limux.*|.*debian.*|.*ubuntu.*|.*centos.*"):
return Linux
class Linux(coshsh.application.Application):
template_rules = [
coshsh.templaterule.TemplateRule(needsattr=None,
template="os_linux_default"),
coshsh.templaterule.TemplateRule(needsattr="filesystems",
template="os_linux_fs"),
]
def __new__(cls, params={}):
if compare_attr("version", params, ".*embedded.*"):
cls = EmbeddedLinux |
return object.__new__(cls)
def __init__(self, params):
self.test4_linux = True
class RedHat(Linux):
pass
class SuSE(Linux):
pass
class EmbeddedLinux(Linux):
template_rules = [
coshsh.templaterule.TemplateRule(needsattr=None,
template="os_linux_heartbeat"),
]
|
lokik/sfepy | sfepy/physics/schroedinger_app.py | Python | bsd-3-clause | 7,582 | 0.001715 | from __future__ import absolute_import
import os
import time
from math import pi
import numpy as nm
from sfepy.base.base import Struct, output, get_default
from sfepy.applications import PDESolverApp
from sfepy.solvers import Solver
from six.moves import range
def guess_n_eigs(n_electron, n_eigs=None):
"""
Guess the number of eigenvalues (energies) to compute so that the smearing
iteration converges. Passing n_eigs overrides the guess.
"""
if n_eigs is not None: return n_eigs
if n_electron > 2:
n_eigs = int(1.2 * ((0.5 * n_electron) + 5))
else:
n_eigs = n_electron
return n_eigs
class SchroedingerApp(PDESolverApp):
"""
Base application for electronic structure calculations.
Subclasses should typically override `solve_eigen_problem()` method.
This class allows solving only simple single electron problems,
e.g. well, oscillator, hydrogen atom and boron atom with 1 electron.
"""
@staticmethod
def process_options(options):
"""
Application options setup. Sets default values for missing
non-compulsory options.
Options:
save_eig_vectors : (from_largest, from_smallest) or None
If None, save all.
"""
get = options.get
n_electron = get('n_electron', 5)
n_eigs = guess_n_eigs(n_electron, n_eigs=get('n_eigs', None))
return Struct(eigen_solver=get('eigen_solver', None,
'missing "eigen_solver" in options!'),
n_electron=n_electron,
n_eigs=n_eigs,
save_eig_vectors=get('save_eig_vectors', None))
def __init__(self, conf, options, output_prefix, **kwargs):
PDESolverApp.__init__(self, conf, options, output_prefix,
init_equations=False)
def setup_options(self):
PDESolverApp.setup_options(self)
opts = SchroedingerApp.process_options(self.conf.options)
self.app_options += opts
def setup_output(self):
"""
Setup various file names for the output directory given by
`self.problem.output_dir`.
"""
output_dir = self.problem.output_dir
opts = self.app_options
opts.output_dir = output_dir
self.mesh_results_name = os.path.join(opts.output_dir,
self.problem.get_output_name())
self.eig_results_name = os.path.join(opts.output_dir,
self.problem.ofn_trunk
+ '_eigs.txt')
def call(self):
# This cannot be in __init__(), as parametric calls may change
# the output directory.
self.setup_output()
evp = self.solve_eigen_problem()
output("solution saved to %s" % self.problem.get_output_name())
output("in %s" % self.app_options.output_dir)
if self.post_process_hook_final is not None: # User postprocessing.
self.post_process_hook_final(self.problem, evp=evp)
return evp
def solve_eigen_problem(self):
options = self.options
opts = self.app_options
pb = self.problem
dim = pb.domain.mesh.dim
pb.set_equations(pb.conf.equations)
pb.time_update()
output('assembling lhs...')
tt = time.clock()
mtx_a = pb.evaluate(pb.conf.equations['lhs'], mode='weak',
auto_init=True, dw_mode='matrix')
output('...done in %.2f s' % (time.clock() - tt))
output('assembling rhs...')
tt = time.clock()
mtx_b = pb.evaluate(pb.conf.equations['rhs'], mode='weak',
dw_mode='matrix')
output('...done in %.2f s' % (time.clock() - tt))
n_eigs = get_default(opts.n_eigs, mtx_a.shape[0])
output('computing resonance frequencies...')
eig = Solver.any_from_conf(pb.get_solver_conf(opts.eigen_solver))
eigs, mtx_s_phi = eig(mtx_a, mtx_b, n_eigs, eigenvectors=True)
output('...done')
bounding_box = pb.domain.mesh.get_bounding_box()
# this assumes a box (3D), or a square (2D):
a = boundi | ng_box[1][0] - bounding_box[0][0]
E_exact = None
if options.hydrogen or options.boron:
if options.hydrogen:
Z = 1
elif options.boron:
Z = 5
if dim == 2:
E_exact = [-float(Z)**2/2/(n-0.5)**2/4
for n in [1]+[2]*3+[3]*5 + [4]*8 + [5]*15]
| elif dim == 3:
E_exact = [-float(Z)**2/2/n**2 for n in [1]+[2]*2**2+[3]*3**2 ]
if options.well:
if dim == 2:
E_exact = [pi**2/(2*a**2)*x
for x in [2, 5, 5, 8, 10, 10, 13, 13,
17, 17, 18, 20, 20 ] ]
elif dim == 3:
E_exact = [pi**2/(2*a**2)*x
for x in [3, 6, 6, 6, 9, 9, 9, 11, 11,
11, 12, 14, 14, 14, 14, 14,
14, 17, 17, 17] ]
if options.oscillator:
if dim == 2:
E_exact = [1] + [2]*2 + [3]*3 + [4]*4 + [5]*5 + [6]*6
elif dim == 3:
E_exact = [float(1)/2+x for x in [1]+[2]*3+[3]*6+[4]*10 ]
if E_exact is not None:
output("a=%f" % a)
output("Energies:")
output("n exact FEM error")
for i, e in enumerate(eigs):
from numpy import NaN
if i < len(E_exact):
exact = E_exact[i]
err = 100*abs((exact - e)/exact)
else:
exact = NaN
err = NaN
output("%d: %.8f %.8f %5.2f%%" % (i, exact, e, err))
else:
output(eigs)
mtx_phi = self.make_full(mtx_s_phi)
self.save_results(eigs, mtx_phi)
return Struct(pb=pb, eigs=eigs, mtx_phi=mtx_phi)
def make_full(self, mtx_s_phi):
variables = self.problem.get_variables()
mtx_phi = nm.empty((variables.di.ptr[-1], mtx_s_phi.shape[1]),
dtype=nm.float64)
for ii in range(mtx_s_phi.shape[1]):
mtx_phi[:,ii] = variables.make_full_vec(mtx_s_phi[:,ii])
return mtx_phi
def save_results(self, eigs, mtx_phi, out=None,
mesh_results_name=None, eig_results_name=None):
mesh_results_name = get_default(mesh_results_name,
self.mesh_results_name)
eig_results_name = get_default(eig_results_name,
self.eig_results_name)
pb = self.problem
save = self.app_options.save_eig_vectors
n_eigs = self.app_options.n_eigs
out = get_default(out, {})
state = pb.create_state()
aux = {}
for ii in range(eigs.shape[0]):
if save is not None:
if (ii > save[0]) and (ii < (n_eigs - save[1])): continue
state.set_full(mtx_phi[:,ii])
aux = state.create_output_dict()
key = list(aux.keys())[0]
out[key+'%03d' % ii] = aux[key]
if aux.get('__mesh__') is not None:
out['__mesh__'] = aux['__mesh__']
pb.save_state(mesh_results_name, out=out)
fd = open(eig_results_name, 'w')
eigs.tofile(fd, ' ')
fd.close()
|
antoinecarme/pyaf | tests/periodicities/Minute/Cycle_Minute_100_T_60.py | Python | bsd-3-clause | 82 | 0.04878 | impo | rt tests.periodicities.period_test as per
per.buildModel((60 , 'T' | , 100));
|
openstack/dragonflow | dragonflow/db/neutron/models.py | Python | apache-2.0 | 1,561 | 0 | # Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql import expression
from sqlalchemy | import types
class utc_timestamp(expression.FunctionElement):
type = types.DateTime()
@compiles(utc_timestamp, 'postgresql')
def pg_utc_timestamp(element, compiler, **kw):
return "TIMEZONE('utc', CURRENT_TIMESTAMP)"
@compiles(utc_timestamp, 'mssql')
def ms_utc_timestamp(element, compiler, **kw):
return "GETUTCDATE()"
@compiles(utc_timestamp, 'mysql')
def my_utc_timestamp(element, compiler, **kw):
return "UTC_TIMESTAMP()"
class DFLockedObjects(model_base.BASEV2) | :
__tablename__ = 'dflockedobjects'
object_uuid = sa.Column(sa.String(36), primary_key=True)
lock = sa.Column(sa.Boolean, default=False)
session_id = sa.Column(sa.BigInteger, default=0)
created_at = sa.Column(sa.DateTime, onupdate=utc_timestamp())
|
tensorflow/tensorflow | tensorflow/lite/schema/upgrade_schema.py | Python | apache-2.0 | 12,866 | 0.004819 | # ==============================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upgrade script to move from pre-release schema to new schema.
Usage examples:
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.tflite out.tflite
"""
import argparse
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import tensorflow as tf
from tensorflow.python.platform import resource_loader
parser = argparse.ArgumentParser(
description="Script to move TFLite models from pre-release schema to "
"new s | chema.")
parser.add_argument(
"input",
type=str,
help="Input TensorFlow lite file in `.json`, `.bin` or `.tflite` format.")
parser.add_argument(
"output",
type=str,
help="Output json or bin TensorFlow lite model compliant with "
"the new schema. Extension must be `.json`, `.bin` or `.tflite`.")
# RAII Temporary Directory, because flatc doesn't allow d | irect use of tempfiles.
@contextlib.contextmanager
def TemporaryDirectoryResource():
temporary = tempfile.mkdtemp()
try:
yield temporary
finally:
shutil.rmtree(temporary)
class Converter(object):
"""Converts TensorFlow flatbuffer models from old to new version of schema.
This can convert between any version to the latest version. It uses
an incremental upgrade strategy to go from version to version.
Usage:
converter = Converter()
converter.Convert("a.tflite", "a.json")
converter.Convert("b.json", "b.tflite")
"""
def __init__(self):
# TODO(aselle): make this work in the open source version with better
# path.
paths_to_try = [
"../../../../flatbuffers/flatc", # not bazel
"../../../../external/flatbuffers/flatc" # bazel
]
for p in paths_to_try:
self._flatc_path = resource_loader.get_path_to_datafile(p)
if os.path.exists(self._flatc_path): break
def FindSchema(base_name):
return resource_loader.get_path_to_datafile("%s" % base_name)
# Supported schemas for upgrade.
self._schemas = [
(0, FindSchema("schema_v0.fbs"), True, self._Upgrade0To1),
(1, FindSchema("schema_v1.fbs"), True, self._Upgrade1To2),
(2, FindSchema("schema_v2.fbs"), True, self._Upgrade2To3),
(3, FindSchema("schema_v3.fbs"), False, None) # Non-callable by design.
]
# Ensure schemas are sorted, and extract latest version and upgrade
# dispatch function table.
self._schemas.sort()
self._new_version, self._new_schema = self._schemas[-1][:2]
self._upgrade_dispatch = {
version: dispatch
for version, unused1, unused2, dispatch in self._schemas}
def _Read(self, input_file, schema, raw_binary=False):
"""Read a tflite model assuming the given flatbuffer schema.
If `input_file` is in bin, then we must use flatc to convert the schema
from binary to json.
Args:
input_file: a binary (flatbuffer) or json file to read from. Extension
must be `.tflite`, `.bin`, or `.json` for FlatBuffer Binary or
FlatBuffer JSON.
schema: which schema to use for reading
raw_binary: whether to assume raw_binary (versions previous to v3)
that lacked file_identifier require this.
Raises:
RuntimeError: 1. When flatc cannot be invoked.
2. When json file does not exists.
ValueError: When the extension is not json or bin.
Returns:
A dictionary representing the read tflite model.
"""
raw_binary = ["--raw-binary"] if raw_binary else []
with TemporaryDirectoryResource() as tempdir:
basename = os.path.basename(input_file)
basename_no_extension, extension = os.path.splitext(basename)
if extension in [".bin", ".tflite"]:
# Convert to json using flatc
returncode = subprocess.call([
self._flatc_path,
"-t",
"--strict-json",
"--defaults-json",
] + raw_binary + ["-o", tempdir, schema, "--", input_file])
if returncode != 0:
raise RuntimeError("flatc failed to convert from binary to json.")
json_file = os.path.join(tempdir, basename_no_extension + ".json")
if not os.path.exists(json_file):
raise RuntimeError("Could not find %r" % json_file)
elif extension == ".json":
json_file = input_file
else:
raise ValueError("Invalid extension on input file %r" % input_file)
return json.load(open(json_file))
def _Write(self, data, output_file):
"""Output a json or bin version of the flatbuffer model.
Args:
data: Dict representing the TensorFlow Lite model to write.
output_file: filename to write the converted flatbuffer to. (json,
tflite, or bin extension is required).
Raises:
ValueError: When the extension is not json or bin
RuntimeError: When flatc fails to convert json data to binary.
"""
_, extension = os.path.splitext(output_file)
with TemporaryDirectoryResource() as tempdir:
if extension == ".json":
json.dump(data, open(output_file, "w"), sort_keys=True, indent=2)
elif extension in [".tflite", ".bin"]:
input_json = os.path.join(tempdir, "temp.json")
with open(input_json, "w") as fp:
json.dump(data, fp, sort_keys=True, indent=2)
returncode = subprocess.call([
self._flatc_path, "-b", "--defaults-json", "--strict-json", "-o",
tempdir, self._new_schema, input_json
])
if returncode != 0:
raise RuntimeError("flatc failed to convert upgraded json to binary.")
shutil.copy(os.path.join(tempdir, "temp.tflite"), output_file)
else:
raise ValueError("Invalid extension on output file %r" % output_file)
def _Upgrade0To1(self, data):
"""Upgrade data from Version 0 to Version 1.
Changes: Added subgraphs (which contains a subset of formally global
entries).
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
subgraph = {}
for key_to_promote in ["tensors", "operators", "inputs", "outputs"]:
subgraph[key_to_promote] = data[key_to_promote]
del data[key_to_promote]
data["subgraphs"] = [subgraph]
def _Upgrade1To2(self, data):
"""Upgrade data from Version 1 to Version 2.
Changes: Rename operators to Conform to NN API.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
Raises:
ValueError: Throws when model builtins are numeric rather than symbols.
"""
def RemapOperator(opcode_name):
"""Go from old schema op name to new schema op name.
Args:
opcode_name: String representing the ops (see :schema.fbs).
Returns:
Converted opcode_name from V1 to V2.
"""
old_name_to_new_name = {
"CONVOLUTION": "CONV_2D",
"DEPTHWISE_CONVOLUTION": "DEPTHWISE_CONV_2D",
"AVERAGE_POOL": "AVERAGE_POOL_2D",
"MAX_POOL": "MAX_POOL_2D",
"L2_POOL": "L2_POOL_2D",
"SIGMOID": "LOGISTIC",
"L2NORM": "L2_NORMALIZATION",
"LOCAL_RESPONSE_NORM": "LOCAL_RESPO |
ChatNode/PerpetualFailure | perpetualfailure/knowledgebase/views.py | Python | gpl-2.0 | 5,808 | 0.001377 | from pyramid.httpexceptions import (
HTTPException,
HTTPFound,
HTTPNotFound,
HTTPBadRequest,
HTTPConflict,
)
from pyramid.security import Authenticated
from pyramid.view import view_config
from perpetualfailure.db import session
from perpetualfailure.knowledgebase.models import (
KB_Article,
KB_ArticleRevision,
)
import copy
import logging
log = logging.getLogger(__name__)
def traverse(path_, page=None, parents=None):
path = path_
# We're rolling out blank, let's start from the KB root (index)
if not page:
path = copy.copy(path_)
node = path.pop(0)
page = session.query(KB_Article).filter(KB_Article.parent == None, KB_Article.name == node).first()
if not parents:
parents = []
# Remove empty elements from the path
# Lets us do stuff like /kb////channels//// == /kb/channels
while (path and not path[0]):
path.pop(0)
# The path list is empty; we've reache the article we wanted (bottom level)
if not path:
return (page, parents)
# Search for the current path node in the names of this page's children
node = path.pop(0)
results = [article for article in page.children if article.name == node]
if not results:
# No results found
return (None, parents)
# Node found; update page variable and check for more.
parents.append(page)
page = results[0]
return traverse(path, page, parents)
@view_config(
route_name='knowledgebase.article.view',
renderer='knowledgebase/article/view.mako',
)
def viewArticle(request):
path = request.matchdict['path']
# Check whether we're trying to load the index or not
if not path or path == "/":
path = [""]
else:
path = path.split("/")
# The index should always be at the first index in the path
path[0] = "index"
# Find the article by traversing the article tree down to the article we
# want. asdfasdfsa fasdfadsf asdfasdf asdfasfdasd fas sadfasf ghei hei hei
(article, parents) = traverse(path)
if not article:
# Much cri :@(
return HTTPNotFound()
# RIP
revision_count = session.execute("select count(id) from knowledgebase_article_revision where article_id = %i;" % article.id).fetchall()[0][0]
# Feed the allmighty Mako
return {"article": article, "parents": parents, "revisions": revision_count}
@view_config(
route_name='knowledgebase.article.create',
renderer='knowledgebase/article/edit.mako',
# TODO: Add a factory and use the "create" permission.
permission=Authenticated,
)
def createArticle(request):
article = KB_Article()
# Construct a list from the path given in the route URL
path = request.matchdict['path'].split("/")
path = [node for node in path if node]
path.insert(0, "index")
if len(path) > 1:
parent = traverse(path[:-1])[0]
if not parent:
return HTTPNotFound()
if traverse(path)[0]:
return HTTPConflict()
# Validate data and if appropriate update and redirect.
r = articleUpdate(request, article, path)
if isinstance(r, HTTPException): return r
return {"article": article}
@view_config(
route_name='knowledgebase.article.edit',
renderer='knowledgebase/article/edit.mako',
# TODO: Add a factory and use the "edit" permission.
permission=Authenticated,
)
def editArticle(request):
# Construct a list from the path given in the route URL
path = request.matchdict['path'].split("/")
path = [node for node in path if node]
path.insert(0, "index")
article = traverse(path)[0]
if not article:
return HTTPNotFound()
| # | Validate data and if appropriate update and redirect.
r = articleUpdate(request, article, path)
if isinstance(r, HTTPException): return r
return {"article": article}
def articleUpdate(request, article, path, is_new=False):
if not request.method == "POST":
return None
for key in ['title', 'content']:
if key not in request.POST:
return HTTPBadRequest()
article.title = request.POST['title']
article.name = path[-1]
article.content = request.POST['content']
# Update the parent of this object
if len(path) > 1:
article.parent = traverse(path[:-1])[0]
elif article.parent:
# This is a root article but it's got a parent, remove the parent
# from this article object.
article.parent = None
curr_rev = KB_ArticleRevision(article)
prev_rev = article.revision
if prev_rev:
prev_rev.children.append(curr_rev)
session.add(prev_rev)
session.add(curr_rev)
article.revision = curr_rev
session.add(article)
return HTTPFound(location=request.route_path('knowledgebase.article.view', path=request.matchdict['path']))
@view_config(
route_name='knowledgebase.revision.compare',
renderer='knowledgebase/revision/compare.mako',
)
def compareRevisions(request):
base = getRevisionFromMatchdict(request, "base")
head = getRevisionFromMatchdict(request, "head")
baseText = ""
if base:
baseText = base.content.split("\n")
headText = ""
if head:
headText = head.content.split("\n")
baseFile = "article/%s/revision/%s" % (base.article.id, base.id)
headFile = "article/%s/revision/%s" % (head.article.id, head.id)
diff = "\n".join(list(difflib.unified_diff(baseText, headText, baseFile, headFile)))
return {"raw_diff": diff, "base": base, "head": head, "baseFile": baseFile,
"headFile": headFile}
def getRevisionFromMatchdict(request, key):
id = request.matchdict[key]
revision = session.query(KB_ArticleRevision) \
.filter(KB_ArticleRevision.id == id).first()
return revision
|
s1na/darkoob | darkoob/book/ajax.py | Python | mit | 2,980 | 0.00604 | from django.utils import simplejson
from dajaxice.decorators import dajaxice_register
from django.utils.translation import ugettext as _
from django.template.loader import render_to_string
from dajax.core import Dajax
from django.db import transaction
from darkoob.book.models import Book, Review
@dajaxice_register(method='POST')
@transaction.commit_manually
def rate(request, rate, book_id):
done = False
book = ''
try:
book = Book.objects.get(id = book_id)
book.rating.add(score=rate, user=request.user, ip_address=request.META['REMOTE_ADDR'])
except:
errors.append('An error occoured in record in database')
transaction.rollback()
else:
done = True
transaction.commit()
return simplejson.dumps({'done':done})
@dajaxice_register(method='POST')
@transaction.commit_manually
def review_rate(request, rate, review_id):
print "review id",review_id
done = False
try:
review = Review.objects.get(id=review_id)
review.rating.add(score=rate, user=request.user, ip_address=request.META['REMOTE_ADDR'])
except:
errors.append('An error occoured in record in database')
transaction.rollback()
else:
done = True
transaction.commit()
return simplejson.dumps({'done': done})
@dajaxice_register(method='POST')
def submit_review(request, book_id, title, text):
dajax = Dajax()
#TODO: checks if you have permission for posting review
try:
book = Book.objects.get(id=book_id)
except Book.DoesNotExist:
dajax.script('''
$.pnotify({
title: 'Review',
type:'error',
text: 'This Book doesn\'t exsist.',
opacity: .8
});
$('#id_text').val('');
$('#id_title').val('');
''')
else:
if len(text) < 200:
transaction.rollback()
dajax.script('''
$.pnotify({
title: 'Review',
type:'error',
text: 'Complete your review. We need some checks', |
opacity: .8
});
$('#id_text').val('');
$('#id_title').val('');
| ''')
else:
review = Review.objects.create(book=book, user=request.user, title=title, text=text)
t_rendered = render_to_string('book/review.html', {'review': review})
dajax.prepend('#id_new_post_position', 'innerHTML', t_rendered)
dajax.script('''
$.pnotify({
title: 'Review',
type:'success',
text: 'Your review record',
opacity: .8
});
$('#id_text').val('');
$('#id_title').val('');
''')
return dajax.json()
@dajaxice_register(method='POST')
def ha(request, book_name):
print "book_name", book_name
return simplejson.dumps({'done': True})
|
gnunixon/sport-news-writer | writer/game/migrations/0013_auto_20150303_1108.py | Python | gpl-3.0 | 847 | 0.001181 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('game', '0012_auto_20150301_2207'),
]
operations = [
migrations.AddField(
model_name='team',
| name='lider',
field=models.NullBooleanField(),
preserve_default=True,
),
migrations.AddField(
model_name='team',
name='loser',
field=models.NullBooleanField(),
preserve_default=True,
),
migrations.AlterField(
model_name='game',
name='pub_date',
field=models.DateField(default=datetime.datetime(2015, 3, 3 | , 11, 8, 11, 345625)),
preserve_default=True,
),
]
|
Datafable/gbif-data-licenses | code/unique_licenses.py | Python | mit | 2,877 | 0.030587 | #!/usr/bin/python
import sys
import csv
import re
import os
def check_arguments():
if len(sys.argv) != 3:
print "usage: ./unique_licenses.py <input file> <output file>"
print " The input file should contain two fields: <dataset id> and <license>"
print " This script will read in the | input, fetch all licenses and compile a"
print " unique list of them. The licenses will be printed out in Markdown that"
print " allows manual annotation. As generating a file and then manually editing"
print " the result can be tricky (because a second run could overwrite manual"
print " work | ), this script will first check for the presence of licenses in the"
print " outputfile. It will only append new licenses to the outputfile."
sys.exit(-1)
return sys.argv[1:]
def get_unique_input_licenses(infile_name):
csvreader = csv.reader(open(infile_name))
input_licenses = []
header = csvreader.next()
for row in csvreader:
ds_key, ds_owner_key, numberOfOccurrences, license = row
if not license in input_licenses:
input_licenses.append(license)
print "number of unique licenses: {0}".format(len(input_licenses))
return input_licenses
def get_known_licenses(outfile_name):
known_licenses = []
if os.path.exists(outfile_name):
with open(outfile_name) as f:
csvreader = csv.reader(f)
table_header = csvreader.next()
for license_line in csvreader:
license = license_line[8]
known_licenses.append(license)
return known_licenses
def find_new_licenses(in_licenses, known_licenses):
new_licenses = []
for license in in_licenses:
if not license in known_licenses:
new_licenses.append(license)
return new_licenses
def write_outfile_header(outfile_name):
with open(outfile_name, 'w') as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerow(['standard license','use','distribution','derivatives','commercial','attribution','share alike','notification','license'])
def append_licenses(licenses_to_append, outfile_name):
with open(outfile_name, 'a') as f:
outwriter = csv.writer(f, lineterminator='\n')
for license in licenses_to_append:
outwriter.writerow(['','','','','','','','',license])
def main():
infile_name, outfile_name = check_arguments()
in_licenses = get_unique_input_licenses(infile_name)
known_licenses = get_known_licenses(outfile_name)
if known_licenses == []:
print 'No licenses written in the provided output file.'
print 'Creating the header of the outputfile now.'
write_outfile_header(outfile_name)
new_licenses = find_new_licenses(in_licenses, known_licenses)
if not new_licenses == []:
print 'Appending new licenses to the outputfile.'
append_licenses(new_licenses, outfile_name)
else:
print 'All licenses of the input file were already documented'
print 'in the provided output file.'
main()
|
hach-que/systemd-packaged | src/python-systemd/journal.py | Python | gpl-2.0 | 20,444 | 0.002397 | # -*- Mode: python; coding:utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2012 David Strauss <david@davidstrauss.net>
# Copyright 2012 Zbigniew Jędrzejewski-Szmek <zbyszek@in.waw.pl>
# Copyright 2012 Marti Raudsepp <marti@juffo.org>
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import sys as _sys
import datetime as _datetime
import uuid as _uuid
import traceback as _traceback
import os as _os
import logging as _logging
if _sys.version_info >= (3,3):
from collections import ChainMap as _ChainMap
from syslog import (LOG_EMERG, LOG_ALERT, LOG_CRIT, LOG_ERR,
LOG_WARNING, LOG_NOTICE, LOG_INFO, LOG_DEBUG)
from ._journal import __version__, sendv, stream_fd
from ._reader import (_Reader, NOP, APPEND, INVALIDATE,
LOCAL_ONLY, RUNTIME_ONLY,
SYSTEM, SYSTEM_ONLY, CURRENT_USER,
_get_catalog)
from . import id128 as _id128
if _sys.version_info >= (3,):
from ._reader import Monotonic
else:
Monotonic = tuple
def _convert_monotonic(m):
return Monotonic((_datetime.timedelta(microseconds=m[0]),
_uuid.UUID(bytes=m[1])))
def _convert_source_monotonic(s):
return _datetime.timedelta(microseconds=int(s))
def _convert_realtime(t):
return _datetime.datetime.fromtimestamp(t / 1000000)
def _convert_timestamp(s):
return _datetime.datetime.fromtimestamp(int(s) / 1000000)
def _convert_trivial(x):
return x
if _sys.version_info >= (3,):
def _convert_uuid(s):
return _uuid.UUID(s.decode())
else:
_convert_uuid = _uuid.UUID
DEFAULT_CONVERTERS = {
'MESSAGE_ID': _convert_uuid,
'_MACHINE_ID': _convert_uuid,
'_BOOT_ID': _convert_uuid,
'PRIORITY': int,
'LEADER': int,
'SESSION_ID': int,
'USERSPACE_USEC': int,
'INITRD_USEC': int,
'KERNEL_USEC': int,
'_UID': int,
'_GID': int,
'_PID': int,
'SYSLOG_FACILITY': int,
'SYSLOG_PID': int,
'_AUDIT_SESSION': int,
'_AUDIT_LOGINUID': int,
'_SYSTEMD_SESSION': int,
'_SYSTEMD_OWNER_UID': int,
'CODE_LINE': int,
'ERRNO': int,
'EXIT_STATUS': int,
'_SOURCE_REALTIME_TIMESTAMP': _convert_timestamp,
'__REALTIME_TIMESTAMP': _convert_realtime,
'_SOURCE_MONOTONIC_TIMESTAMP': _convert_source_monotonic,
'__MONOTONIC_TIMESTAMP': _convert_monotonic,
'__CURSOR': _convert_trivial,
'COREDUMP': bytes,
'COREDUMP_PID': int,
'COREDUMP_UID': int,
'COREDUMP_GID': int,
'COREDUMP_SESSION': int,
'COREDUMP_SIGNAL': int,
'COREDUMP_TIMESTAMP': _convert_timestamp,
}
_IDENT_LETTER = set('ABCDEFGHIJKLMNOPQRTSUVWXYZ_')
def _valid_field_name(s):
return not (set(s) - _IDENT_LETTER)
class Reader(_Reader):
"""Reader allows the access and filtering of systemd journal
entries. Note that in order to access the system journal, a
non-root user must be in the `systemd-journal` group.
Example usage to print out all informational or higher level
messages for systemd-udevd for this boot:
>>> j = journal.Reader()
>>> j.this_boot()
>>> j.log_level(journal.LOG_INFO)
>>> j.add_match(_SYSTEMD_UNIT="systemd-udevd.service")
>>> for entry in j:
... print(entry['MESSAGE'])
See systemd.journal-fields(7) for more info on typical fields
found in the journal.
"""
def __init__(self, flags=0, path=None, files=None, converters=None):
"""Create an instance of Reader, which allows filtering and
return of journal entries.
Argument `flags` sets open flags of the journal, which can be one
of, or ORed combination of constants: LOCAL_ONLY (default) opens
journal on local machine only; RUNTIME_ONLY opens only
volatile journal files; and SYSTEM_ONLY opens only
journal files of system services and the kernel.
Argument `path` is the directory of journal files. Note that
`flags` and `path` are exclusive.
Argument `converters` is a dictionary which updates the
DEFAULT_CONVERTERS to convert journal field values. Field
names are used as keys into this dictionary. The values must
be single argument functions, which take a `bytes` object and
return a converted value. When there's no entry for a field
name, then the default UTF-8 decoding will be attempted. If
the conversion fails with a ValueError, unconverted bytes
object will be returned. (Note that ValueEror is a superclass
of UnicodeDecodeError).
Reader implements the context manager protocol: the j | ournal
will be closed when exiting the block.
"""
super(Reader, self).__init__(flags, path, files)
if _sys.version_info >= (3,3):
self.converters = _ChainMap()
if converters is not None:
self.converters.maps.append(converters)
self.converters.maps.append(DEFAULT_CONVERTERS)
else:
self.converters = DEFAULT_CONVERTERS.copy()
if converters | is not None:
self.converters.update(converters)
def _convert_field(self, key, value):
"""Convert value using self.converters[key]
If `key` is not present in self.converters, a standard unicode
decoding will be attempted. If the conversion (either
key-specific or the default one) fails with a ValueError, the
original bytes object will be returned.
"""
convert = self.converters.get(key, bytes.decode)
try:
return convert(value)
except ValueError:
# Leave in default bytes
return value
def _convert_entry(self, entry):
"""Convert entire journal entry utilising _covert_field"""
result = {}
for key, value in entry.items():
if isinstance(value, list):
result[key] = [self._convert_field(key, val) for val in value]
else:
result[key] = self._convert_field(key, value)
return result
def __iter__(self):
"""Part of iterator protocol.
Returns self.
"""
return self
def __next__(self):
"""Part of iterator protocol.
Returns self.get_next() or raises StopIteration.
"""
ans = self.get_next()
if ans:
return ans
else:
raise StopIteration()
if _sys.version_info < (3,):
next = __next__
def add_match(self, *args, **kwargs):
"""Add one or more matches to the filter journal log entries.
All matches of different field are combined in a logical AND,
and matches of the same field are automatically combined in a
logical OR.
Matches can be passed as strings of form "FIELD=value", or
keyword arguments FIELD="value".
"""
args = list(args)
args.extend(_make_line(key, val) for key, val in kwargs.items())
for arg in args:
super(Reader, self).add_match(arg)
def get_next(self, skip=1):
"""Return the next log entry as a mapping type, currently
a standard dictionary of fields.
Optional skip value will return the `skip`\-th log entry.
Entries will be processed with converters specified during
Reader creation.
"""
if super(Reader, self)._next(skip):
entry = super(Reader, self)._get_all()
if entry:
entry['__REALTIME_TIMESTAMP'] = self._ge |
esikachev/sahara-backup | sahara/db/sqlalchemy/models.py | Python | apache-2.0 | 15,416 | 0.000065 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
import six
import sqlalchemy as sa
from sqlalchemy.orm import relationship
from sahara.db.sqlalchemy import model_base as mb
from sahara.db.sqlalchemy import types as st
# Helpers
def _generate_unicode_uuid():
return six.text_type(uuid.uuid4())
def _id_column():
return sa.Column(sa.String(36),
primary_key=True,
default=_generate_unicode_uuid)
# Main objects: Cluster, NodeGroup, Instance
class Cluster(mb.SaharaBase):
"""Contains all info about cluster."""
__tablename__ = 'clusters'
__table_args__ = (
sa.UniqueConstraint('name', 'tenant_id'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text)
tenant_id = sa.Column(sa.String(36))
trust_id = sa.Column(sa.String(36))
is_transient = sa.Column(sa.Boolean, default=False)
plugin_name = sa.Column(sa.String(80), nullable=False)
hadoop_version = | sa.Column(sa.String(80), nullable=False)
cluster_configs = sa.Column(st.JsonDictType())
default_image_id = sa.Column(sa.String(36))
neutron_management_network = sa.Column(sa.String(36))
anti_affinity = sa.Column(st.JsonListType() | )
management_private_key = sa.Column(sa.Text, nullable=False)
management_public_key = sa.Column(sa.Text, nullable=False)
user_keypair_id = sa.Column(sa.String(80))
status = sa.Column(sa.String(80))
status_description = sa.Column(st.LongText())
info = sa.Column(st.JsonDictType())
extra = sa.Column(st.JsonDictType())
rollback_info = sa.Column(st.JsonDictType())
sahara_info = sa.Column(st.JsonDictType())
provision_progress = relationship('ClusterProvisionStep',
cascade="all,delete",
backref='cluster',
lazy='joined')
node_groups = relationship('NodeGroup', cascade="all,delete",
backref='cluster', lazy='joined')
cluster_template_id = sa.Column(sa.String(36),
sa.ForeignKey('cluster_templates.id'))
cluster_template = relationship('ClusterTemplate',
backref="clusters", lazy='joined')
def to_dict(self, show_progress=False):
d = super(Cluster, self).to_dict()
d['node_groups'] = [ng.to_dict() for ng in self.node_groups]
d['provision_progress'] = [pp.to_dict(show_progress) for pp in
self.provision_progress]
return d
class NodeGroup(mb.SaharaBase):
"""Specifies group of nodes within a cluster."""
__tablename__ = 'node_groups'
__table_args__ = (
sa.UniqueConstraint('name', 'cluster_id'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
tenant_id = sa.Column(sa.String(36))
flavor_id = sa.Column(sa.String(36), nullable=False)
image_id = sa.Column(sa.String(36))
image_username = sa.Column(sa.String(36))
node_processes = sa.Column(st.JsonListType())
node_configs = sa.Column(st.JsonDictType())
volumes_per_node = sa.Column(sa.Integer)
volumes_size = sa.Column(sa.Integer)
volumes_availability_zone = sa.Column(sa.String(255))
volume_mount_prefix = sa.Column(sa.String(80))
volume_type = sa.Column(sa.String(255))
count = sa.Column(sa.Integer, nullable=False)
instances = relationship('Instance', cascade="all,delete",
backref='node_group',
order_by="Instance.instance_name", lazy='joined')
cluster_id = sa.Column(sa.String(36), sa.ForeignKey('clusters.id'))
node_group_template_id = sa.Column(sa.String(36),
sa.ForeignKey(
'node_group_templates.id'))
node_group_template = relationship('NodeGroupTemplate',
backref="node_groups", lazy='joined')
floating_ip_pool = sa.Column(sa.String(36))
security_groups = sa.Column(st.JsonListType())
auto_security_group = sa.Column(sa.Boolean())
availability_zone = sa.Column(sa.String(255))
open_ports = sa.Column(st.JsonListType())
is_proxy_gateway = sa.Column(sa.Boolean())
volume_local_to_instance = sa.Column(sa.Boolean())
def to_dict(self):
d = super(NodeGroup, self).to_dict()
d['instances'] = [i.to_dict() for i in self.instances]
return d
class Instance(mb.SaharaBase):
"""An OpenStack instance created for the cluster."""
__tablename__ = 'instances'
__table_args__ = (
sa.UniqueConstraint('instance_id', 'node_group_id'),
)
id = _id_column()
tenant_id = sa.Column(sa.String(36))
node_group_id = sa.Column(sa.String(36), sa.ForeignKey('node_groups.id'))
instance_id = sa.Column(sa.String(36))
instance_name = sa.Column(sa.String(80), nullable=False)
internal_ip = sa.Column(sa.String(15))
management_ip = sa.Column(sa.String(15))
volumes = sa.Column(st.JsonListType())
# Template objects: ClusterTemplate, NodeGroupTemplate, TemplatesRelation
class ClusterTemplate(mb.SaharaBase):
"""Template for Cluster."""
__tablename__ = 'cluster_templates'
__table_args__ = (
sa.UniqueConstraint('name', 'tenant_id'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text)
cluster_configs = sa.Column(st.JsonDictType())
default_image_id = sa.Column(sa.String(36))
anti_affinity = sa.Column(st.JsonListType())
tenant_id = sa.Column(sa.String(36))
neutron_management_network = sa.Column(sa.String(36))
plugin_name = sa.Column(sa.String(80), nullable=False)
hadoop_version = sa.Column(sa.String(80), nullable=False)
node_groups = relationship('TemplatesRelation', cascade="all,delete",
backref='cluster_template', lazy='joined')
is_default = sa.Column(sa.Boolean(), default=False)
def to_dict(self):
d = super(ClusterTemplate, self).to_dict()
d['node_groups'] = [tr.to_dict() for tr in
self.node_groups]
return d
class NodeGroupTemplate(mb.SaharaBase):
"""Template for NodeGroup."""
__tablename__ = 'node_group_templates'
__table_args__ = (
sa.UniqueConstraint('name', 'tenant_id'),
)
id = _id_column()
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text)
tenant_id = sa.Column(sa.String(36))
flavor_id = sa.Column(sa.String(36), nullable=False)
image_id = sa.Column(sa.String(36))
plugin_name = sa.Column(sa.String(80), nullable=False)
hadoop_version = sa.Column(sa.String(80), nullable=False)
node_processes = sa.Column(st.JsonListType())
node_configs = sa.Column(st.JsonDictType())
volumes_per_node = sa.Column(sa.Integer, nullable=False)
volumes_size = sa.Column(sa.Integer)
volumes_availability_zone = sa.Column(sa.String(255))
volume_mount_prefix = sa.Column(sa.String(80))
volume_type = sa.Column(sa.String(255))
floating_ip_pool = sa.Column(sa.String(36))
security_groups = sa.Column(st.JsonListType())
auto_security_group = sa.Column(sa.Boolean())
availability_zone = sa.Column(sa.String(255))
is_proxy_gateway = sa.Column(sa.Boolean())
volume_local_to_instance = sa.Column(sa.Boolean())
is_default = sa.Column(sa.Boolean(), default=False)
class TemplatesRelation(mb.SaharaBase):
"""NodeGroupTemplate - Cluste |
flowsha/zhwh | web/com/zhwh/controllers/error.py | Python | apache-2.0 | 264 | 0.026515 | # -*- coding: utf-8 -*-
#
#Copyright 2013 Xiangxiang Telecom Cor | poratio | n.
#@author: liusha
"""This module contains implementations of error action.
"""
#import web
class ErrorAction:
def GET(self):
return "You are accessing valid url."
#end |
vipulroxx/sympy | sympy/concrete/expr_with_limits.py | Python | bsd-3-clause | 14,916 | 0.002078 | from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.relational import Equality
from sympy.sets.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, range
from sympy.core.containers import Tuple
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.utilities import flatten
from sympy.utilities.iterables import sift
from sympy.matrices import Matrix
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if V[0].is_Symbol:
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
orientation *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim ))
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = sympify(function)
if hasattr(function, 'func') and function.func is Equality:
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Only limits with lower and upper bounds are supported; the indefinite form
# is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the dummy variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
| def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def as_dummy(self):
"""
Replace instances of the given dummy variables with explicit dum | my
counterparts to make clear what are dummy variables and what
are real-world symbols in an object.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If the object supperts the "integral at" limit ``(x,)`` it
is not treated as a dummy, but the explicit form, ``(x, x)``
of length 2 does treat the variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
the symbols which cannot be changed by subs() are clearly seen as
those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in range(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return self.func(f, *limits)
def _eval_interval(self, x, a, b):
limits = [( i if i[0] != x else (x,a,b) ) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
|
anselmobd/fo2 | src/lotes/migrations/0070_regracolecao_table_name.py | Python | mit | 608 | 0.001656 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-05-19 15:13
from __futu | re__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('lotes', '0069_leadcolecao_regracolecao'),
]
operations = [
| migrations.AlterModelOptions(
name='regracolecao',
options={'verbose_name': 'Regra por coleção', 'verbose_name_plural': 'Regras por coleção'},
),
migrations.AlterModelTable(
name='regracolecao',
table='fo2_lot_regra_colecao',
),
]
|
Raukonim/piva | qr.py | Python | gpl-3.0 | 6,541 | 0.099832 | # -*- coding: utf-8 -*-
"""
Created on Tue May 05 09:12:14 2015
@author: afajula
"""
from pylab import *
s="HELLO WORLD"
#%% variable declaration
alphanumeric_table2 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:"
numeric_table ="0123456789"
alphanumeric_table={
'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,'9':9,
'A':10,'B':11,'C':12,'D':13,'E':14,'F':15,'G':16,'H':17,'I':18,
'J':19,'K':20,'L':21,'M':22,'N':23,'O':24,'P':25,'Q':26,'R':27,
'S':28,'T':29,'U':30,'V':31,'W':32,'X':33,'Y':34,'Z':35,' ':36,
'$':37,'%':38,'*':39,'+':40,'-':41,'.':42,'/':43,':':44}
'''
Character capacities table source: http://www.thonky.com/qr-code-tutorial/character-capacities/
Implemented modes: Numerical, Alphanumerical, Byte
x=[version,correction]
version: 1-40
correction: L,M,Q,H
'''
#Numerical mode
num=array([
[41,34,27,17],[77,63,48,34],[127,101,77,58],[187,149,111,82],
[255,202,144,106],[322,255,178,139],[370,293,207,154],[461,365,259,202],
[552,432,312,235],[652,513,364,288],[772,604,427,331],[883,691,489,374],
[1022,796,580,427],[1101,871,621,468],[1250,991,703,530],[1408,1082,775,602],
[1548,1212,876,674],[1725,1346,948,746],[1903,1500,1063,813],[2061,1600,1159,919],
[2232,1708,1224,969],[2409,1872,1358,1056],[2620,2059,1468,1108],[2812,2188,1588,1228],
[3057,2395,1718,1286],[3283,2544,1804,1425],[3517,2701,1933,1501],[3669,2857,2085,1581],
[3909,3035,2181,1677],[4158,3289,2358,1782],[4417,3486,2473,1897],[4686,3693,2670,2022],
[4965,3909,2805,2157],[5253,4134,2949,2301],[5529,4343,3081,2361],[5836,4588,3244,2524],
[6153,4775,3417,2625],[6479,5039,3599,2735],[6743,5313,3791,2927],[7089,5596,3993,3057]
])
#Alphaumerical mode
alpha=array([
[25,20,16,10],[47,38,29,20],[77,61,47,35],[114,90,67,50],
[154,122,87,64],[195,154,108,84],[224,178,125,93],[279,221,157,122],
[335,262,189,143],[395,311,221,174],[468,366,259,200],[535,419,296,227],
[319,483,352,259],[667,528,376,283],[758,600,426,321],[854,656,470,365],
[938,734,531,408],[1046,816,574,452],[1153,909,644,493],[1249,970,702,557],
[1352,1035,742,587],[1460,1134,823,640],[1588,1248,890,672],[1704,1326,963,744],
[1853,1451,1041,779],[1990,1542,1094,864],[2132,1637,1172,910],[2223,1732,1263,958],
[2369,1839,1322,1016],[2520,1994,1429,1080],[2677,2113,1499,1150],[2840,2238,1618,1226],
[3009,2369,1700,1307],[3183,2506,1787,1394],[3351,2632,1867,1431],[3537,2780,1966,1530],
[3729,2894,2071,1591],[3927,3054,2181,1658],[4087,3220,2298,1774],[4296,3391,2420,1852]
])
#Byte mode
byte=array([
[17,14,11,7],[32,26,20,14],[53,42,32,24],[78,62,46,34],
[106,84,60,44],[134,106,74,58],[154,122,86,64],[192,152,108,84],
[230,180,130,98],[271,213,151,119],[321,251,177,137],[367,287,203,155],
[425,331,241,177],[458,362,258,194],[520,412,292,220],[586,450,322,250],
[644,504,364,280],[718,560,394,310],[792,624,442,338],[858,666,482,382],
[929,711,509,403],[1003,779,565,439],[1091,857,611,461],[1171,911,661,511],
[1273,997,715,533],[1367,1059,751,593],[1465,1125,805,625],[1528,1190,868,658],
[1628,1264,908,698],[1732,1370,982,742],[1840,1452,1030,790],[1952,1538,1112,842],
[2068,1638,1168,898],[2188,1722,1228,958],[2303,1809,1283,983],[2431,1911,1351,1051],
[2563,1989,1423,1093],[2699,2099,1499,1139],[289,2213,1579,1219],[2953,2331,1663,1273]
])
num_indicator=[10,12,14]
alpha_indicator=[9,11,13]
byte_indicator=[8,16,16]
#%% qrcode class
class qrcode:
def __init__ (self, data, version=1, correction=0 ):
self.data = data.decode('utf-8') #Convertim tot a Unicode
self.comp_dades()
self.length = len(self.data)
self.define_mode()
self.check_len()
self.correction = correction
self.check_version()
#self.version = version
self.bin_len = bin(self.length)[2:]
self.qr_matrix = array([[0],[0]])
self.car_count=9
self.pad_len=self.car_count_calc()
#def caracter_count_indicator(self, version, encoding):
# self.car_count=9
"""
Numeric 7089 characters
Alphanumeric 4296 characters
Byte 2953 characters
Kanji 1817 characters
"""
def check_len(self):
a={'0001':7089,'0010':4296,'0100':2953}
if a[self.mode]<self.length:
print 'ERROR'
exit()
def comp_dades(self):
for c in self.data:
try:
c.encode('latin1')
except UnicodeEncodeError:
print "ERROR!"
def define_mode(self):
numeric_seq =[x in numeric_table for x in self.data]
alpha_seq = [x in alphanumeric_table2 for x in self.data]
if False in numeric_seq:
if False in alpha_seq:
self.mode = '0100' #Byte
self.capacities=byte
self.padding=byte_indicator
else:
self.mode = '0010' #Alpha
self.capacities=alpha
self.padding=alpha_indicator
else:
self.mode = '0001'#Numeric
self.capacities=num
self.padding=num_indicator
def check_version(self):
for i in range(39,-1, -1):
if self.capacities[i, self.correction]>=self.length :
self.version=i+1
else:
break
def car_count_calc(self):
if version<27:
if version<10:
self.car_count=self.padding[0]
else:
self.car_count=self.padding[1]
else:
self.car_count=self.padding[2]
self.padding_indicator=self.bin_len.zfill(self.car_count)
def define_correction(self, corr):
self.correction=corr
def matrix_init(self):
self.qr_matrix=zeros([21+(4*(self.version-1))], [21+(4*(self.version-1))])
def alphanumeric_encod | ing(self):
z=''
for i in range(0,self.length,2):
x=''
x=self.data[i:i+2]
print x
if len(x)==1:
y=bi | n(alphanumeric_table[x])[2:]
y=y.zfill(6)
else:
y=bin((45*alphanumeric_table[x[0]])+alphanumeric_table[x[1]])[2:]
y=y.zfill(11)
print y
z+=y
print z
#%%
hello=qrcode(s)
hello.car_count_calc()
hello.alphanumeric_encoding()
|
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckanext/reclineview/tests/test_view.py | Python | gpl-3.0 | 6,249 | 0 | # encoding: utf-8
import paste.fixture
from ckan.common import config
import ckan.model as model
import ckan.tests.legacy as tests
import ckan.plugins as p
import ckan.lib.helpers as h
import ckanext.reclineview.plugin as plugin
import ckan.lib.create_test_data as create_test_data
import ckan.config.middleware as middleware
from ckan.tests import helpers, factories
class BaseTestReclineViewBase(tests.WsgiAppCase):
@classmethod
def setup_class(cls):
cls.config_templates = config['ckan.legacy_templates']
config['ckan.legacy_templates'] = 'false'
wsgiapp = middleware.make_app(config['global_conf'], **config)
p.load(cls.view_type)
cls.app = paste.fixture.TestApp(wsgiapp)
cls.p = cls.view_class()
create_test_data.CreateTestData.create()
cls.resource_view, cls.package, cls.resource_id = \
_create_test_view(cls.view_type)
@classmethod
def teardown_class(cls):
config['ckan.legacy_templates'] = cls.config_templates
p.unload(cls.view_type)
model.repo.rebuild_db()
def test_can_view(self):
data_dict = {'resource': {'datastore_active': True}}
assert self.p.can_view(data_dict)
data_dict = {'resource': {'datastore_active': False}}
assert not self.p.can_view(data_dict)
def test_title_description_iframe_shown(self):
url = h.url_for(controller='package', action='resource_read',
id=self.package.name, resource_id=self.resource_id)
| result = self.app.get(url)
assert self.resource_view['title | '] in result
assert self.resource_view['description'] in result
assert 'data-module="data-viewer"' in result.body
class TestReclineView(BaseTestReclineViewBase):
view_type = 'recline_view'
view_class = plugin.ReclineView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
def test_can_view_format_no_datastore(self):
'''
Test can_view with acceptable formats when datastore_active is False
(DataProxy in use).
'''
formats = ['CSV', 'XLS', 'TSV', 'csv', 'xls', 'tsv']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert self.p.can_view(data_dict)
def test_can_view_bad_format_no_datastore(self):
'''
Test can_view with incorrect formats when datastore_active is False.
'''
formats = ['TXT', 'txt', 'doc', 'JSON']
for resource_format in formats:
data_dict = {'resource': {'datastore_active': False,
'format': resource_format}}
assert not self.p.can_view(data_dict)
class TestReclineViewDatastoreOnly(helpers.FunctionalTestBase):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
if not p.plugin_loaded('datastore'):
p.load('datastore')
app_config = config.copy()
app_config['ckan.legacy_templates'] = 'false'
app_config['ckan.plugins'] = 'recline_view datastore'
app_config['ckan.views.default_views'] = 'recline_view'
wsgiapp = middleware.make_app(config['global_conf'], **app_config)
cls.app = paste.fixture.TestApp(wsgiapp)
@classmethod
def teardown_class(cls):
if p.plugin_loaded('recline_view'):
p.unload('recline_view')
if p.plugin_loaded('datastore'):
p.unload('datastore')
def test_create_datastore_only_view(self):
dataset = factories.Dataset()
data = {
'resource': {'package_id': dataset['id']},
'fields': [{'id': 'a'}, {'id': 'b'}],
'records': [{'a': 1, 'b': 'xyz'}, {'a': 2, 'b': 'zzz'}]
}
result = helpers.call_action('datastore_create', **data)
resource_id = result['resource_id']
url = h.url_for(controller='package', action='resource_read',
id=dataset['id'], resource_id=resource_id)
result = self.app.get(url)
assert 'data-module="data-viewer"' in result.body
class TestReclineGridView(BaseTestReclineViewBase):
view_type = 'recline_grid_view'
view_class = plugin.ReclineGridView
def test_it_has_no_schema(self):
schema = self.p.info().get('schema')
assert schema is None, schema
class TestReclineGraphView(BaseTestReclineViewBase):
view_type = 'recline_graph_view'
view_class = plugin.ReclineGraphView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'graph_type', 'group', 'series']
_assert_schema_exists_and_has_keys(schema, expected_keys)
class TestReclineMapView(BaseTestReclineViewBase):
view_type = 'recline_map_view'
view_class = plugin.ReclineMapView
def test_it_has_the_correct_schema_keys(self):
schema = self.p.info().get('schema')
expected_keys = ['offset', 'limit', 'map_field_type',
'latitude_field', 'longitude_field', 'geojson_field',
'auto_zoom', 'cluster_markers']
_assert_schema_exists_and_has_keys(schema, expected_keys)
def _create_test_view(view_type):
context = {'model': model,
'session': model.Session,
'user': model.User.get('testsysadmin').name}
package = model.Package.get('annakarenina')
resource_id = package.resources[1].id
resource_view = {'resource_id': resource_id,
'view_type': view_type,
'title': u'Test View',
'description': u'A nice test view'}
resource_view = p.toolkit.get_action('resource_view_create')(
context, resource_view)
return resource_view, package, resource_id
def _assert_schema_exists_and_has_keys(schema, expected_keys):
assert schema is not None, schema
keys = schema.keys()
keys.sort()
expected_keys.sort()
assert keys == expected_keys, '%s != %s' % (keys, expected_keys)
|
scrapinghub/exporters | tests/test_readers.py | Python | bsd-3-clause | 2,129 | 0.001409 | import unittest
from exporters.readers.base_reader import BaseReader
from exporters.readers.random_reader import RandomReader
from .utils import meta
class BaseReaderTest(unittest.TestCase):
def setUp(self):
self.reader = BaseReader({}, meta())
def test_get_next_batch_not_implemented(self):
with self.assertRaises(NotImplementedError):
self.reader.get_next_batch()
def test_set_last_position(self):
self.reader.set_last_position(dict(position=5))
self.assertEqual(self.reader.last_position, dict(position=5))
class RandomReaderTest(unittest.TestCase):
def setUp(self):
self.options = {
'exporter_options': {
'log_level': 'DEBUG',
'logger_name': 'export-pipeline'
},
'reader': {
'name': 'exporters.readers.random_reader.RandomReader',
'options': {
'number_of_items': 1000,
'batch_size': 100
}
},
}
self.reader = RandomReader(self.options, meta())
self.reader.set_last_position(None)
def test_get_nex | t_batch(self):
batch = list(self.reader.get_next_batch())
self.assertEqual(len(batch), self.options['reader']['options']['batch_size'])
def test_get_second_batch(self):
self.reader.get_next_batch()
batch = list(self.reader.get_next_batch())
self.assertEqual(len(batch), self.options['reader']['options']['batch_size'])
self.assertEqual(self. | reader.get_metadata('read_items'),
self.options['reader']['options']['batch_size'])
def test_get_all(self):
total_items = 0
while not self.reader.finished:
batch = list(self.reader.get_next_batch())
total_items += len(batch)
self.assertEqual(total_items, self.options['reader']['options']['number_of_items'])
def test_set_last_position_none(self):
self.reader.set_last_position({'last_read': 123})
self.assertEqual({'last_read': 123}, self.reader.last_position)
|
daicang/Leetcode-solutions | 137-single-number-ii.py | Python | mit | 413 | 0.024213 | from typing import List
class Solution:
de | f singleNumber(self, nums: List[int]) -> int:
seen_once = seen_twice = 0
for n in nums:
seen_once = ~seen_twice & (seen_once^n)
seen_twice = ~seen_once & (seen_twice^n)
| # print(seen_once, seen_twice)
return seen_once
data = [
[1,1,1,3,3,3,5]
]
s = Solution()
for d in data:
print(s.singleNumber(d))
|
rit-sailing/website | events/migrations/0002_auto_20160123_1320.py | Python | mit | 749 | 0.001335 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-23 18:20
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
de | pendencies = [
('main', '0001_initial'),
('events', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='event',
old_name='start_date',
new_name='date',
),
migrations.AddField(
model_name='event',
name='organizer',
field=mode | ls.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='organizer', to='main.TeamMember'),
),
]
|
ScreamingUdder/mantid | qt/python/mantidqt/widgets/manageuserdirectories.py | Python | gpl-3.0 | 933 | 0.001072 | # This file is part of the mantidqt package
#
# Copyright (C) 2017 mantidproject
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GN | U General Public License
# along with this program. If not, see <ht | tp://www.gnu.org/licenses/>.
from __future__ import (absolute_import, unicode_literals)
from mantidqt.utils.qt import import_qtlib
ManageUserDirectories = import_qtlib('_widgetscore', 'mantidqt.widgets', 'ManageUserDirectories')
|
peterayeni/django-kong-admin | kong_admin/logic.py | Python | bsd-3-clause | 1,977 | 0.003035 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from .factory import get_api_sync_engine, get_consumer_sync_engine
from .models import APIReference, ConsumerReference, PluginConfigurationReference
def synchronize_apis(client, queryset=None):
return get_api_sync_engine().synchronize(client, queryset=queryset, delete=True)
|
def synchronize_api(client, obj, toggle=False):
if (toggle and obj.enabled) or (not toggle and not obj.enabled):
obj = get_api_sync_engine().withdraw(client, obj)
obj.enabled = False
else:
obj = get_api_sync_engine().publish(client, obj)
obj.enabled = True
if toggle:
# Updated enabled state without triggering another save
APIReference.objects.filter(id=o | bj.id).update(enabled=obj.enabled)
return obj
def synchronize_plugin_configurations(client, queryset=None):
return get_api_sync_engine().plugins().synchronize(client, queryset=queryset, delete=True)
def synchronize_plugin_configuration(client, obj, toggle=False):
obj.enabled = not obj.enabled if toggle else obj.enabled
obj = get_api_sync_engine().plugins().publish(client, obj)
# Updated enabled state without triggering another save
PluginConfigurationReference.objects.filter(id=obj.id).update(enabled=obj.enabled)
return obj
def synchronize_consumers(client, queryset=None):
return get_consumer_sync_engine().synchronize(client, queryset=queryset, delete=True)
def synchronize_consumer(client, obj, toggle=False):
if (toggle and obj.enabled) or (not toggle and not obj.enabled):
obj = get_consumer_sync_engine().withdraw(client, obj)
obj.enabled = False
else:
obj = get_consumer_sync_engine().publish(client, obj)
obj.enabled = True
if toggle:
# Updated enabled state without triggering another save
ConsumerReference.objects.filter(id=obj.id).update(enabled=obj.enabled)
return obj
|
dom96/MDSBot | modules/base.py | Python | gpl-2.0 | 539 | 0.009276 | #!/usr/bin/env python
'''
Example module
Take a look | at the test module to check how events are handled.
'''
about = "About message goes here."
def main(server, initOnChannel, usrManager):
# This gets called when the module is initialized.
# InitOnChannel is the channel this module was loaded. Or nothing
# if it was loaded at start | up.
def destroy(server):
# This gets called when the module get's unloaded.
def cmd(server, word, word_eol, usrManager):
# This is called when a command is received(|cmd or |whatever)
|
Ghost-script/Online-FIR-General-Diary-lodging-System | Web_App/Web_App/models.py | Python | mit | 2,567 | 0.005454 | from django.db import models
from django.contrib.auth.models import User
from police.models import Stationdata
class general_diary(models.Model):
ref_id = models.CharField(max_length=40,unique=True,default="00000")
firstname = models.CharField(max_length=20)
lastname = models.CharField(max_length=20)
mobile = models.CharField(max_length=10)
email = models.CharField(max_length=80)
address = models.TextField()
DOB = models.DateField('date of birth')
idType_1 = models.CharField(max_length=10)
| idType_1_value = models.CharField(max_length=15)
idType_2 = models.CharField(max_length=20)
idType_2_value = models.CharField(max_length=15)
StationCode = models.ForeignKey(Stationdata)
Subject = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
detail = models.TextField()
Time = models.DateTimeField('Occurence')
Place = models.CharField(max_length=200)
Loss = models.CharField(max_length=200)
OTP = model | s.BooleanField(default=False)
def __str__(self): # __unicode__ on Python 2
return self.Subject
class Fir(models.Model):
ref_id = models.CharField(max_length=40,unique=True,default="00000")
firstname = models.CharField(max_length=20)
lastname = models.CharField(max_length=20)
mobile = models.CharField(max_length=10)
email = models.CharField(max_length=80)
address = models.TextField()
DOB = models.DateField('date of birth')
idType_1 = models.CharField(max_length=10)
idType_1_value = models.CharField(max_length=15)
idType_2 = models.CharField(max_length=20)
idType_2_value = models.CharField(max_length=15)
StationCode = models.ForeignKey(Stationdata)
Subject = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
detail = models.TextField()
Suspect = models.CharField(max_length=500)
Time = models.DateTimeField('Occurence')
Place = models.CharField(max_length=200)
Witness = models.CharField(max_length=500)
Loss = models.CharField(max_length=200)
OTP = models.BooleanField(default=False)
def __str__(self): # __unicode__ on Python 2
return self.Subject
class lookup_table(models.Model):
ref_id = models.CharField(max_length=40,unique=True,default="00000")
hashmap = models.CharField(max_length=70,unique=True,default="00000")
type = models.CharField(max_length=5,default="GD")
def __str__(self): # __unicode__ on Python 2
return self.hashmap |
qtproject/pyside-shiboken | tests/otherbinding/extended_multiply_operator_test.py | Python | gpl-2.0 | 2,230 | 0.010762 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
'''Test cases for libsample's Point multiply operator defined in libother module.'''
import unittest
from sample import Point
from other import Number
class PointOperationsWithNumber(unittest.TestCase):
'''Test cases for libsample's Point multiply operator defined in libother module.'''
def testPointTimesInt(self):
'''sample.Point * int'''
pt1 = P | oint(2, 7)
num = 3
pt2 = Point(pt1.x() * num, pt1.y() * num)
self.assertEqual(pt1 * num, pt2)
def testIntTimesPoint(self):
'''int * sample.Point'''
pt1 = Point(2, 7)
num = 3
pt2 = | Point(pt1.x() * num, pt1.y() * num)
self.assertEqual(num * pt1, pt2)
def testPointTimesNumber(self):
'''sample.Point * other.Number'''
pt = Point(2, 7)
num = Number(11)
self.assertEqual(pt * num, pt * 11)
if __name__ == '__main__':
unittest.main()
|
vianney-g/python-exercices | eulerproject/pb0027.py | Python | gpl-2.0 | 793 | 0.006305 | #!/usr/bin/env python
# *-* coding:utf-8 *-*
"""
Date :
Author : Vianney Gremmel loutre.a@gmail.com
"""
def memo(f):
class Memo(dict):
def __missing__(self, key):
r = self[key] = f(key)
return r
return Memo().__getitem__
@memo
def isprime(n):
for d in xrange(2, int(n**0.5) + 1): |
if n % d == 0:
return False
return True
def maxi_primes():
for a in xrange(-1000, 1001):
for b in xrange(-999, 1001, 2):
n = 0
while True:
if not isprime(abs(n*n + a*n + b)) and n:
yield (n, a, b)
break
n += 1
print 'please wait...'
max_score = max(score for score in maxi_primes())
print max_score
print max_score[1]*max_score[2] | |
pastpages/savepagenow | savepagenow/api.py | Python | mit | 4,446 | 0.002474 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
import requests
from .exceptions import (
CachedPage,
WaybackRuntimeError,
BlockedByRobots
)
from urllib.parse import urljoin
from requests.utils import parse_header_links
def capture(
target_url,
user_agent="savepagenow (https://github.com/pastpages/savepagenow)",
accept_cache=False
):
"""
Archives the provided URL using archive.org's Wayback Machine.
Returns the archive.org URL where the capture is stored.
Raises a CachedPage exception if archive.org declines to conduct a new
capture and returns a previous snapshot instead.
To silence that exception, pass into True to the ``accept_cache`` keyword
argument.
"""
# Put together the URL that will save our request
domain = "https://web.archive.org"
save_url = urljoin(domain, "/save/")
request_url = save_url + target_url
# Send the capture request to archive.org
headers = {
'User-Agent': user_agent,
}
response = requests.get(request_url, headers=headers)
# If it has an error header, raise that.
has_error_header = 'X-Archive-Wayback-Runtime-Error' in response.headers
if has_error_header:
error_header = response.headers['X-Archive-Wayback-Runtime-Error']
if error_header == 'RobotAccessControlException: Blocked By Robots':
raise BlockedByRobots("archive.org returned blocked by robots.txt error")
else:
raise WaybackRuntimeError(error_header)
# If it has an error code, raise that
if response.status_code in [403, 502, 520]:
raise WaybackRuntimeError(response.headers)
# If there's a content-location header in the response, we will use that.
try:
content_location = response.headers['Content-Location']
archive_url = domain + content_location
except KeyError:
# If there's not, we will try to parse out a Link header, which is another style they use.
try:
# Parse the Link tag in the header, which points to memento URLs in Wayback
header_links = parse_header_links(response.headers['Link'])
archive_obj = [h for h in header_links if h['rel'] == 'memento'][0]
archive_url = archive_obj['url']
except Exception:
# If neither of those things works throw this error.
raise WaybackRuntimeError(dict(status_code=response.status_code, headers=response.headers))
# Determine if the response was cached
cached = 'X-Page-Cache' in response.headers and response.headers['X-Page-Cache'] == 'HIT'
# If it was cached ...
if cached:
# .. and we're not allowing that
if not accept_cache:
# ... throw an error
msg = "archive.org returned a cache of this page: {}".format(archive_url)
raise CachedPage(msg)
# Finally, return the archived URL
return archive_url
def capture_or_cache(
target_url,
user_agent="savepagenow (https://github.com/pastpages | /savepagenow)"
):
"""
Archives the provided URL using archive.org's Wayback Machine, u | nless
the page has been recently captured.
Returns a tuple with the archive.org URL where the capture is stored,
along with a boolean indicating if a new capture was conducted.
If the boolean is True, archive.org conducted a new capture. If it is False,
archive.org has returned a recently cached capture instead, likely taken
in the previous minutes.
"""
try:
return capture(target_url, user_agent=user_agent, accept_cache=False), True
except CachedPage:
return capture(target_url, user_agent=user_agent, accept_cache=True), False
@click.command()
@click.argument("url")
@click.option("-ua", "--user-agent", help="User-Agent header for the web request")
@click.option("-c", "--accept-cache", help="Accept and return cached URL", is_flag=True)
def cli(url, user_agent, accept_cache):
"""
Archives the provided URL using archive.org's Wayback Machine.
Raises a CachedPage exception if archive.org declines to conduct a new
capture and returns a previous snapshot instead.
"""
kwargs = {}
if user_agent:
kwargs['user_agent'] = user_agent
if accept_cache:
kwargs['accept_cache'] = accept_cache
archive_url = capture(url, **kwargs)
click.echo(archive_url)
if __name__ == "__main__":
cli()
|
chfoo/wpull | wpull/urlrewrite_test.py | Python | gpl-3.0 | 6,269 | 0.002393 | import unittest
from wpull.url import URLInfo
from wpull.urlrewrite import URLRewriter, strip_path_session_id, \
strip_query_session_id
class TestURLRewrite(unittest.TestCase):
def test_rewriter(self):
rewriter = URLRewriter(hash_fragment=True, session_id=True)
self.assertEquals(
'http://example.com/',
rewriter.rewrite(URLInfo.parse('http://example.com/')).url
)
self.assertEquals(
'http://example.com/',
rewriter.rewrite(URLInfo.parse('http://example.com/#hashtag!')).url
)
self.assertEquals(
'https://groups.google.com/forum/?_escaped_fragment_=forum/python-tulip',
rewriter.rewrite(URLInfo.parse('https://groups.google.com/forum/#!forum/python-tulip')).url
)
self.assertEquals(
'https://groups.google.com/forum/?stupid_hash_fragments&_escaped_fragment_=forum/python-tulip',
rewriter.rewrite(URLInfo.parse(
'https://groups.google.com/forum/?stupid_hash_fragments#!forum/python-tulip'
)).url
)
self.assertEquals(
'https://groups.google.com/forum/?stupid_hash_fragments=farts&_escaped_fragment_=forum/python-tulip',
rewriter.rewrite(URLInfo.parse(
'https://groups.google.com/forum/?stupid_hash_fragments=farts#!forum/python-tulip'
)).url
)
self.assertEquals(
'http://example.com/',
rewriter.rewrite(URLInfo.parse(
'http://example.com/?sid=0123456789abcdefghijklemopqrstuv'
)).url
)
self.assertEquals(
'http://example.com/?horse=dog&',
rewriter.rewrite(URLInfo.parse(
'http://example.com/?horse=dog&sid=0123456789abcdefghijklemopqrstuv'
)).url
)
def test_strip_session_id_from_url_path(self):
self.assertEqual(
'/asdf',
strip_path_session_id("/asdf"),
)
self.assertEqual(
'/asdf/asdf.aspx',
strip_path_session_id("/asdf/asdf.aspx"),
)
self.assertEqual(
strip_path_session_id("/(S(4hqa0555fwsecu455xqckv45))/mileg.aspx"),
'/mileg.aspx',
'Check ASP_SESSIONID2'
)
self.assertEqual(
strip_path_session_id("/(4hqa0555fwsecu455xqckv45)/mileg.aspx"),
'/mileg.aspx',
'Check ASP_SESSIONID2 (again)'
)
self.assertEqual(
strip_path_session_id("/(a(4hqa0555fwsecu455xqckv45)S(4hqa0555fwsecu455xqckv45)f(4hqa0555fwsecu455xqckv45))/mileg.aspx?page=sessionschedules"),
'/mileg.aspx?page=sessionschedules',
'Check ASP_SESSIONID3'
)
self.assertEqual(
strip_path_session_id("/photos/36050182@N05/"),
'/photos/36050182@N05/',
"'@' in path"
)
def test_strip_session_id_from_url_query(self):
str32id = "0123456789abcdefghijklemopqrstuv"
url = "jsessionid=" + str32id
self.assertEqual(
strip_query_session_id(url),
''
)
url = "jsessionid=" + str32id + '0'
self.assertEqual(
strip_query_session_id(url),
'jsessionid=0123456789abcdefghijklemopqrstuv0',
"Test that we don't strip if not 32 chars only."
)
url = "jsessionid=" + str32id + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'x=y',
"Test what happens when followed by another key/value pair."
)
url = "one=two&jsessionid=" + str32id + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'one=two&x=y',
"Test what happens when followed by another key/value pair and"
"prefixed by a key/value pair."
)
url = "one=two&jsessionid=" + str32id
self.assertEqual(
strip_query_session_id(url),
'one=two&',
"Test what happens when prefixed by a key/value pair."
)
url = "aspsessionidABCDEFGH=" + "ABCDEFGHIJKLMNOPQRSTUVWX" + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'x=y',
"Test aspsession."
)
url = "phpsessid=" + str32id + "&x=y"
self.assertEqual(
strip_query_session_id(url),
'x=y',
"Test archive phpsession."
)
url = "one=two&phpsessid=" + str32id + "&x=y"
| self.assertEqual(
| strip_query_session_id(url),
'one=two&x=y',
"With prefix too."
)
url = "one=two&phpsessid=" + str32id
self.assertEqual(
strip_query_session_id(url),
'one=two&',
"With only prefix"
)
url = "sid=9682993c8daa2c5497996114facdc805" + "&x=y";
self.assertEqual(
strip_query_session_id(url),
'x=y',
"Test sid."
)
url = "sid=9682993c8daa2c5497996114facdc805" + "&" + "jsessionid=" + str32id
self.assertEqual(
strip_query_session_id(url),
'',
"Igor test."
)
url = "CFID=1169580&CFTOKEN=48630702&dtstamp=22%2F08%2F2006%7C06%3A58%3A11"
self.assertEqual(
strip_query_session_id(url),
'dtstamp=22%2F08%2F2006%7C06%3A58%3A11'
)
url = "CFID=12412453&CFTOKEN=15501799&dt=19_08_2006_22_39_28"
self.assertEqual(
strip_query_session_id(url),
'dt=19_08_2006_22_39_28'
)
url = "CFID=14475712&CFTOKEN=2D89F5AF-3048-2957-DA4EE4B6B13661AB&r=468710288378&m=forgotten"
self.assertEqual(
strip_query_session_id(url),
'r=468710288378&m=forgotten'
)
url = "CFID=16603925&CFTOKEN=2AE13EEE-3048-85B0-56CEDAAB0ACA44B8"
self.assertEqual(
strip_query_session_id(url),
''
)
url = "CFID=4308017&CFTOKEN=63914124&requestID=200608200458360%2E39414378"
self.assertEqual(
strip_query_session_id(url),
'requestID=200608200458360%2E39414378'
)
|
CLLKazan/iCQA | qa-engine/forum/settings/users.py | Python | gpl-3.0 | 4,093 | 0.02321 | from forms import CommaStringListWidget
from django.forms import CheckboxSelectMultiple
from django.forms.widgets import RadioSelect
from base import Setting, SettingSet
from django.utils.translation import ugettext as _
USERS_SET = SettingSet('users', _('Users settings'), _("General settings for the iCQA users."), 20)
EDITABLE_SCREEN_NAME = Setting('EDITABLE_SCREEN_NAME', False, USERS_SET, dict(
label = _("Editable screen name"),
help_text = _("Allow users to alter their screen name."),
required=False))
MIN_USERNAME_LENGTH = Setting('MIN_USERNAME_LENGTH', 3, USERS_SET, dict(
label = _("Minimum username length"),
help_text = _("The minimum length (in character) of a username.")))
RESERVED_USERNAMES = Setting('RESERVED_USERNAMES',
[_('fuck'), _('shit'), _('ass'), _('sex'), _('add'), _('edit'), _('save'), _('delete'), _('manage'), _('update'), _('remove'), _('new')]
, USERS_SET, dict(
label = _("Disabled usernames"),
help_text = _("A comma separated list of disabled usernames (usernames not allowed during a new user registration)."),
widget=CommaStringListWidget))
SHOW_STATUS_DIAMONDS = Setting('SHOW_STATUS_DIAMONDS', True, USERS_SET, dict(
label=_("Show status diamonds"),
help_text = _("Show status \"diamonds\" next to moderators or superusers usernames."),
required=False,
))
EMAIL_UNIQUE = Setting('EMAIL_UNIQUE', True, USERS_SET, dict(
label = _("Force unique email"),
help_text = _("Should each user have an unique email."),
required=False))
REQUIRE_EMAIL_VALIDATION_TO = Setting('REQUIRE_EMAIL_VALIDATION_TO', [], USERS_SET, dict(
label = _("Require email validation to..."),
help_text = _("Which actions in this site, users without a valid email will be prevented from doing."),
widget=CheckboxSelectMultiple,
choices=(("ask", _("ask questions")), ("answer", _("provide answers")), ("comment", _("make comments")), ("flag", _("report posts"))),
required=False,
))
DONT_NOTIFY_UNVALIDATED = Setting('DONT_NOTIFY_UNVALIDATED', True, USERS_SET, dict(
label = _("Don't notify to invalid emails"),
help_text = _("Do not notify users with unvalidated emails."),
required=False))
HOLD_PENDING_POSTS_MINUTES = Setting('HOLD_PENDING_POSTS_MINUTES', 120, USERS_SET, dict(
label=_("Hold pending posts for X minutes"),
help_text=_("How much time in minutes a post should be kept in session until the user logs in or validates the email.")
))
WARN_PENDING_POSTS_MINUTES = Setting('WARN_PENDING_POSTS_MINUTES', 15, USERS_SET, dict(
label=_("Warn about pending posts afer X minutes"),
help_text=_("How much time in minutes a user that just logged in or validated his email should be warned about a pending post instead of publishing it automatically.")
))
GRAVATAR_RATING_CHOICES = (
('g', _('suitable for display on all websites with any audience | type.')),
('pg', _('may contain rude gestures, provocatively dressed individuals, the lesser swear words, or mild violence.')),
('r', _('may contain such things as harsh profanity, intense violen | ce, nudity, or hard drug use.')),
('x', _('may contain hardcore sexual imagery or extremely disturbing violence.')),
)
GRAVATAR_ALLOWED_RATING = Setting('GRAVATAR_ALLOWED_RATING', 'g', USERS_SET, dict(
label = _("Gravatar rating"),
help_text = _("Gravatar allows users to self-rate their images so that they can indicate if an image is appropriate for a certain audience."),
widget=RadioSelect,
choices=GRAVATAR_RATING_CHOICES,
required=False))
GRAVATAR_DEFAULT_CHOICES = (
('mm', _('(mystery-man) a simple, cartoon-style silhouetted outline of a person (does not vary by email hash)')),
('identicon', _('a geometric pattern based on an email hash')),
('monsterid', _('a generated "monster" with different colors, faces, etc')),
('wavatar', _('generated faces with differing features and backgrounds')),
)
GRAVATAR_DEFAULT_IMAGE = Setting('GRAVATAR_DEFAULT_IMAGE', 'identicon', USERS_SET, dict(
label = _("Gravatar default"),
help_text = _("Gravatar has a number of built in options which you can also use as defaults."),
widget=RadioSelect,
choices=GRAVATAR_DEFAULT_CHOICES,
required=False))
|
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/db/models/signals.py | Python | bsd-3-clause | 658 | 0.00304 | from django.dispatch import Signal
class_prepared = Signal(providing_args=["class"])
pre_init = Signal(providing_args=["instance", "args", | "kwargs"])
post_init = Signal(providing_args=["instance"])
pre_save = Signal(providing_args=["instance", "raw", "using"])
post_save = Signal(providing_args=["instance", "raw", "created", "using"])
| pre_delete = Signal(providing_args=["instance", "using"])
post_delete = Signal(providing_args=["instance", "using"])
post_syncdb = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive"])
m2m_changed = Signal(providing_args=["action", "instance", "reverse", "model", "pk_set", "using"])
|
SteveRyherd/pyRenamer | src/pyrenamer_menu_cb.py | Python | gpl-2.0 | 2,231 | 0.000897 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2006-2008 Adolfo González Blázquez <code@infinicode.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Founda | tion, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
If you find any bugs or have any suggestions email: code@infinicode.org
"""
class PyrenamerMenuCB:
def __init__(self, main):
self.main = main
def on_menu_undo_activate(self, widget):
self.main.undo_manager.undo()
self.main.dir_reload_current()
self.main.menu_undo.set_sensitive(False)
self.main.menu_redo.set_sen | sitive(True)
def on_menu_redo_activate(self, widget):
self.main.undo_manager.redo()
self.main.dir_reload_current()
self.main.menu_undo.set_sensitive(True)
self.main.menu_redo.set_sensitive(False)
def on_menu_refresh_activate(self, widget):
self.main.file_browser.create_new()
self.main.file_browser.set_active_dir(self.main.active_dir)
def on_menu_patterns_activate(self, widget):
self.main.notebook.set_current_page(0)
def on_menu_substitutions_activate(self, widget):
self.main.notebook.set_current_page(1)
def on_menu_insert_activate(self, widget):
self.main.notebook.set_current_page(2)
def on_menu_manual_activate(self, widget):
self.main.notebook.set_current_page(3)
def on_menu_images_activate(self, widget):
self.main.notebook.set_current_page(4)
def on_menu_music_activate(self, widget):
self.main.notebook.set_current_page(5)
def on_menu_show_options_activate(self, widget):
self.main.options_panel_state(widget.get_active()) |
belokop/indico_bare | indico/modules/admin/__init__.py | Python | gpl-3.0 | 2,175 | 0.004138 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under | the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS | FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.core import signals
from indico.util.i18n import _
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuSection, SideMenuItem
@signals.menu.sections.connect_via('admin-sidemenu')
def _sidemenu_sections(sender, **kwargs):
yield SideMenuSection('security', _("Security"), 90, icon='shield')
yield SideMenuSection('user_management', _("User Management"), 60, icon='users')
yield SideMenuSection('customization', _("Customization"), 50, icon='wrench')
yield SideMenuSection('integration', _("Integration"), 40, icon='earth')
@signals.menu.items.connect_via('admin-sidemenu')
def _sidemenu_items(sender, **kwargs):
yield SideMenuItem('general', _('General Settings'), url_for('admin.adminList'), 100, icon='settings')
yield SideMenuItem('storage', _('Disk Storage'), url_for('admin.adminSystem'), 70, icon='stack')
yield SideMenuItem('ip_domains', _('IP Domains'), url_for('admin.domainList'), section='security')
yield SideMenuItem('ip_acl', _('IP-based ACL'), url_for('admin.adminServices-ipbasedacl'), section='security')
yield SideMenuItem('layout', _('Layout'), url_for('admin.adminLayout'), section='customization')
yield SideMenuItem('homepage', _('Homepage'), url_for('admin.updateNews'), section='customization')
yield SideMenuItem('protection', _('Protection'), url_for('admin.adminProtection'), section='security')
|
diarmuidcwc/GTSVideoDecom | VideoGTSDecom.py | Python | gpl-2.0 | 3,209 | 0.009037 | # -------------------------------------------------------------------------------
# Name:
# Purpose:
#
# Copyright 2014 Diarmuid Collins dcollins@curtisswright.com
# https://github.com/diarmuid
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import GtsDec
import MpegTS
import VidOverPCM
import datetime
import logging
from profilehooks import profile
class VideoGTSDecom(GtsDec.GtsDec):
'''Create a new class inheriting the GtsDec class. Override the callback method using this approach'''
BASE_UDP_PORT = 7777
def __init__(self):
super(VideoGTSDecom, self).__init__()
self.vidOverPCM = VidOverPCM.VidOverPCM()
self.mpegTS = dict()
self.logtofile = True
self.dstip = "235.0.0.1"
self.dstport = 7777
self._debugcount = 0
def addVidOverPCM(self,vidoverPCM,diagnostics=True):
'''
:type vidoverPCM: VidOverPCM.VidOverPCM
'''
self.vidOverPCM = vidoverPCM
udp_port = self.dstport
for vid in self.vidOverPCM.vidsPerXidml:
self.mpegTS[vid] = MpegTS.MpegTS(udpport=udp_port,ipaddress=self.dstip,name=vid)
self.mpegTS[vid].diagnostics = diagnostics
if self.logtofile:
sanitisedFname = vid.replace("/","_")
self.mpegTS[vid]._dumpfname = "{}_{}.ts".format(sanitisedFname,datetime.datetime.now().strftime("%Y%m%d%H%S"))
udp_port += 1
def logSummary(self):
ret_str = ""
for vid in self.vidOverPCM.vidsPerXidml:
logging.info( | "Transmitting vid {} to address {} on port {}\n".format(self.mpegTS[vid].name,self.mpegTS[vid].dstip,self.mpegTS[vid].dstudp))
|
def bufferCallBack(self,timeStamp,pwords,wordCount,puserInfo):
'''The callback method that is run on every frame'''
# This method will take a full PCM frame and return a dict of buffers
# one for each VID in the PCM frame
self._debugcount += 1
#print "Received frame count {} wordcount = {} time = {}".format(self._debugcount,wordCount,time.clock())
vid_bufs = self.vidOverPCM.frameToBuffers(pwords[:wordCount])
for vid,buf in vid_bufs.iteritems():
#print "Decom frame vid = {}".format(vid)
#start= time.clock()
self.mpegTS[vid].addPayload(buf)
#end= time.clock()
#logging.info("Start = {} Time To Run = {}".format(start,end-start))
return 0
|
MingfeiPan/leetcode | math/168.py | Python | apache-2.0 | 448 | 0.004566 | class Solution:
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
| ret = ''
while n / 26 > 1:
temp = n % 26
n = n // 26
| print(n)
print(temp)
#考虑z的情况
if temp == 0:
temp = 26
n -= 1
ret = chr(temp+64)+ret
ret = chr(n+64)+ret
return ret
|
pkoutsias/SickRage | sickbeard/browser.py | Python | gpl-3.0 | 4,389 | 0.002278 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io/
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as p | ublished by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along wi | th SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import os
import string
from sickbeard import logger
from sickrage.helper.encoding import ek
# adapted from http://stackoverflow.com/questions/827371/is-there-a-way-to-list-all-the-available-drive-letters-in-python/827490
def getWinDrives():
""" Return list of detected drives """
assert os.name == 'nt'
from ctypes import windll
drives = []
bitmask = windll.kernel32.GetLogicalDrives() # @UndefinedVariable
for letter in string.uppercase:
if bitmask & 1:
drives.append(letter)
bitmask >>= 1
return drives
def getFileList(path, includeFiles):
# prune out directories to protect the user from doing stupid things (already lower case the dir to reduce calls)
hide_list = ['boot', 'bootmgr', 'cache', 'config.msi', 'msocache', 'recovery', '$recycle.bin',
'recycler', 'system volume information', 'temporary internet files'] # windows specific
hide_list += ['.fseventd', '.spotlight', '.trashes', '.vol', 'cachedmessages', 'caches', 'trash'] # osx specific
hide_list += ['.git']
file_list = []
for filename in ek(os.listdir, path):
if filename.lower() in hide_list:
continue
full_filename = ek(os.path.join, path, filename)
is_dir = ek(os.path.isdir, full_filename)
if not includeFiles and not is_dir:
continue
entry = {
'name': filename,
'path': full_filename
}
if not is_dir:
entry['isFile'] = True
file_list.append(entry)
return file_list
def foldersAtPath(path, includeParent=False, includeFiles=False):
""" Returns a list of dictionaries with the folders contained at the given path
Give the empty string as the path to list the contents of the root path
(under Unix this means "/", on Windows this will be a list of drive letters)
:param includeParent: boolean, include parent dir in list as well
:param includeFiles: boolean, include files or only directories
:return: list of folders/files
"""
# walk up the tree until we find a valid path
while path and not ek(os.path.isdir, path):
if path == ek(os.path.dirname, path):
path = ''
break
else:
path = ek(os.path.dirname, path)
if path == '':
if os.name == 'nt':
entries = [{'currentPath': 'Root'}]
for letter in getWinDrives():
letter_path = letter + ':\\'
entries.append({'name': letter_path, 'path': letter_path})
return entries
else:
path = '/'
# fix up the path and find the parent
path = ek(os.path.abspath, ek(os.path.normpath, path))
parent_path = ek(os.path.dirname, path)
# if we're at the root then the next step is the meta-node showing our drive letters
if path == parent_path and os.name == 'nt':
parent_path = ''
try:
file_list = getFileList(path, includeFiles)
except OSError as e:
logger.log('Unable to open %s: %s / %s' % (path, repr(e), str(e)), logger.WARNING)
file_list = getFileList(parent_path, includeFiles)
file_list = sorted(file_list,
lambda x, y: cmp(ek(os.path.basename, x['name']).lower(), ek(os.path.basename, y['path']).lower()))
entries = [{'currentPath': path}]
if includeParent and parent_path != path:
entries.append({'name': '..', 'path': parent_path})
entries.extend(file_list)
return entries
|
okusama27/kametwi | kametwi/twitter.py | Python | apache-2.0 | 3,518 | 0.008243 | from requests_oauthlib import OAuth1Session
import argparse
import json
from datetime import datetime
#import dateutil.tz
import pytz
import logging
import sys
from kametwi.settings import Settings
"""
This is a command line tool of watching twitter timelines
"""
__status__ = "production"
__date__ = "11 Apr 2015"
logger = logging.getLogger(__name__)
# Set REST API URL
HOME_TIME_LINE_URL = 'https://api.twitter.com/1.1/statuses/home_timeline.json'
USER_TIME_LINE_URL = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
POST_STATUS_UPDATE = 'https://api.twitter.com/1.1/statuses/update.json'
# Set twitter keys
CK = Settings.TWITTER_KEYS['CK']
CS = Settings.TWITTER_KEYS['CS']
AT = Settings.TWITTER_KEYS['AT']
AS = Settings.TWITTER_KEYS['AS']
# Set timezone
tz_tokyo = pytz.timezone(Settings.TIME_ZONE)
def main():
# Get args
parser = argparse.ArgumentParser(description='twitter command line tool')
parser.add_argument('command')
parser.add_argument('-s', '--screen_name')
parser.add_argument('-t', '--tweet')
parser.add_argument('-v') # Display the request body
parser.add_argument('--version', action='version', version='%(prog)s 2.0')
args = parser.parse_args()
# OAuth
oauth = OAuth1Session(CK, CS, AT, AS)
if args.command == "home":
show_home_timeline(oauth)
elif args.command == "timeline":
show_user_timeline(oauth,args)
elif args.command == "tweet":
post_tweet(oauth,args)
else:
show_home_timeline(oauth)
# Get someone's timeline
def show_user_timeline(oauth,args):
params = {"screen_name":args.screen_name}
req = oauth.get(USER_TIME_LINE_URL, params = params)
# Check response
if req.status_code == 200:
timeline = json.loads(req.text)
else:
print ("Error: %d" % req.status_code)
timeline.reverse()
# display timeline
for tweet in timeline:
dt = datetime.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
#dt.replace(tzinfo=dateutil.tz.tzutc()).astimezone(dateutil.tz.tzlocal())
created_at = tz_tokyo.fromutc(dt).strftime('%Y-%m-%d %H:%M:%S')
print('[{created_at}] {message}'.format(created_at=created_at,
message=tweet["text"]))
def show_home_timeline(oauth):
params = {}
req = oauth.get(HOME_TIME_LINE_URL, params = params)
# Check response
if req.status_code == 200:
timeline = json.loads(req.text)
else:
print ("Error: %d" % req.status_code)
timeline.reverse()
# display timeline
for tweet in timeline:
# i.g. Sat Apr 11 06:10:12 +0000 2015
dt = datetime.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
#dt.replace(tzi | nfo=dateutil.tz.tzutc()).astimezone(dateutil.tz.tzlocal())
created_at = tz_tokyo.fromutc(dt).strftime('%Y-%m-%d %H:%M:%S')
print('[{created_at}] @{screen_name}: {message}'.format(created_at=cr | eated_at,
screen_name=tweet["user"]["screen_name"],
message=tweet["text"]))
def post_tweet(oauth,args):
params = {"status":args.tweet}
req = oauth.post(POST_STATUS_UPDATE, params = params)
# Check response
if req.status_code == 200:
print("OK")
else:
print("Error: {status_code}".format(req.status_code))
# main flow
if __name__ == '__main__':
sys.exit(main()) |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_shared/http_challenge_cache.py | Python | mit | 2,520 | 0.001587 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import threading
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse # type: ignore
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import Dict
from .http_challenge import HttpChallenge
_cache = {} # type: Dict[str, HttpChallenge]
_lock = threading.Lock()
def get_challenge_for_url(url):
""" Gets the challenge for the cached URL.
:param url: the | URL the challenge is cached for.
:rtype: HttpBearerChallenge """
if not url:
raise ValueError("URL cannot be None")
key = _get_cache_key(url)
with _lock:
return _cache.get(key)
def _get_cache_key( | url):
"""Use the URL's netloc as cache key except when the URL specifies the default port for its scheme. In that case
use the netloc without the port. That is to say, https://foo.bar and https://foo.bar:443 are considered equivalent.
This equivalency prevents an unnecessary challenge when using Key Vault's paging API. The Key Vault client doesn't
specify ports, but Key Vault's next page links do, so a redundant challenge would otherwise be executed when the
client requests the next page."""
parsed = parse.urlparse(url)
if parsed.scheme == "https" and parsed.port == 443:
return parsed.netloc[:-4]
return parsed.netloc
def remove_challenge_for_url(url):
""" Removes the cached challenge for the specified URL.
:param url: the URL for which to remove the cached challenge """
if not url:
raise ValueError("URL cannot be empty")
url = parse.urlparse(url)
with _lock:
del _cache[url.netloc]
def set_challenge_for_url(url, challenge):
""" Caches the challenge for the specified URL.
:param url: the URL for which to cache the challenge
:param challenge: the challenge to cache """
if not url:
raise ValueError("URL cannot be empty")
if not challenge:
raise ValueError("Challenge cannot be empty")
src_url = parse.urlparse(url)
if src_url.netloc != challenge.source_authority:
raise ValueError("Source URL and Challenge URL do not match")
with _lock:
_cache[src_url.netloc] = challenge
def clear():
""" Clears the cache. """
with _lock:
_cache.clear()
|
karkhaz/tuscan | toolchains/install_bootstrap/android/setup.py | Python | apache-2.0 | 2,742 | 0 | #!/usr/bin/python3
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Arch Linux container for building all dependencies of all Arch Linux
# packages.
from utilities import log, timestamp, run_cmd, recursive_chown
import os
import os.path
import shutil
import subprocess
def toolchain_specific_setup(args):
log("info", "Running android-specific setup")
if not os.path.isdir("/sysroot"):
os.mkdir("/sysroot")
recursive_chown("/sysroot")
# wget and curl output unsuitable progress bars even when not
# connected to a TTY. Turn them off.
with open("/etc/wgetrc", "a") as f:
print("verbose = off", file=f)
with open("/etc/.curlrc", "a") as f:
print("silent", file=f)
print("show-error", file=f)
log("info", "Downloading & unpacking NDK")
os.chdir("/home/tuscan")
setup_file = "/home/tuscan/ndk.bin"
cmd = ("wget -O %s"
" http://dl.google.com/android/ndk/android-"
"ndk-r10e-linux-x86_64.bin" % (setup_file))
run_cmd(cmd)
cmd = "chmod +x " + setup_file
run_cmd(cmd)
run_cmd(setup_file, output=False)
log("info", "Setting up toolchain")
cmd = ("/home/tuscan/android-ndk-r10e/build/tools/"
"make-standalone-toolchain.sh"
" --arch=arm --platform=android-21 "
" --install-dir=" + "/sysroot")
run_cmd(cmd)
cmd = "chown -R tuscan: " + "/sysroot"
run_cmd(cmd, as_root=True)
cmd = "chown -R tuscan: /home/tuscan/android-ndk-r10e"
run_cmd(cmd, as_root=True)
bindirs = [
"/sysroot/bin",
"/sysroot/libexec/gcc/arm-linux-androideabi/4.8"
]
for d in bindirs:
for f in os.listdir(d):
f = os.path.join(d, f)
cmd = "chmod a+rx %s" % f
| run_cmd(cmd, as_root=True)
for f in os.listdir("/sysroot"):
if os.path.isdir(os.path.join("/sysroot", f)):
shutil.copytree(os.path.join("/sysroot", f),
os.path.join("/toolchain | _root", f))
elif os.path.isfile(os.path.join("/sysroot", f)):
shutil.copy(os.path.join("/sysroot", f), "/toolchain_root")
recursive_chown("/toolchain_root")
|
joke2k/faker | faker/providers/date_time/id_ID/__init__.py | Python | mit | 861 | 0 | from .. import Provider as DateTime | Provider
cla | ss Provider(DateTimeProvider):
def day_of_week(self) -> str:
day = self.date("%w")
DAY_NAMES = {
"0": "Senin",
"1": "Selasa",
"2": "Rabu",
"3": "Kamis",
"4": "Jumat",
"5": "Sabtu",
"6": "Minggu",
}
return DAY_NAMES[day]
def month_name(self) -> str:
month = self.month()
MONTH_NAMES = {
"01": "Januari",
"02": "Februari",
"03": "Maret",
"04": "April",
"05": "Mei",
"06": "Juni",
"07": "Juli",
"08": "Agustus",
"09": "September",
"10": "Oktober",
"11": "November",
"12": "Desember",
}
return MONTH_NAMES[month]
|
cbare/Etudes | python/binary_tree.py | Python | apache-2.0 | 5,399 | 0.003704 | """
Binary Tree
"""
import sys,random
class Tree:
def __init__(self, item, left=None, right=None):
self.item = item
self.left = left
self.right = right
def __repr__(self):
return "Tree(%s)" % str(self.item)
def insert(tree, a):
"""
Insert an item into the tree. This is functional style, so
usage is:
tree = insert(tree, 'foo')
"""
if tree:
if a < tree.item:
tree.left = insert(tree.left, a)
else:
tree.right = insert(tree.right, a)
else:
tree = Tree(a)
return tree
def delete(tree, a):
"""
This is quirky 'cause we can usually get away with
throwing away the return value except in the case
where we delete the root node. Not ideal.
"""
root = tree
parent = None
while tree is not None:
if tree.item == a:
if tree.left and tree.right:
## replace deleted node with min node in right subtree
repl = tree.right
if repl.left:
while repl.left:
repl_parent = repl
repl = repl.left
if repl.right:
repl_parent.left = repl.right
else:
repl_parent.left = None
repl.left = tree.left
if tree.right != repl:
repl.right = tree.right
elif tree.left:
## replace deleted node with its left child
repl = tree.left
elif tree.right:
## replace deleted node with its right child
repl = tree.right
else:
## deleting a le | af node
repl = None
if parent:
if a < parent.item:
parent.left = repl
else:
parent.right = repl
else:
## replace a root node, perhaps with None
root = repl
break
elif a < tree.item:
parent = tree
| tree = tree.left
else:
parent = tree
tree = tree.right
else:
raise KeyError("Key %s not found" % str(a))
return root
def print_in_order(tree):
if tree:
sys.stdout.write('(')
print_in_order(tree.left)
sys.stdout.write(str(tree.item))
print_in_order(tree.right)
sys.stdout.write(')')
def traverse_in_order(tree):
"""
Generator that yields items in order. Uses yield from which is
new as of Python 3.3.
"""
if tree:
yield from traverse_in_order(tree.left)
yield tree.item
yield from traverse_in_order(tree.right)
def min_node(tree):
"""
Return the minimum node of the tree or raise a ValueError
if the tree is empty.
"""
if tree is None:
raise ValueError("Can't take min of empty tree")
if tree.left:
return min_node(tree.left)
return tree
def min(tree):
"""
Return the minimum item in the tree or raise a ValueError
if the tree is empty.
"""
mt = min_node(tree)
return mt.item if mt else None
def test(tree):
if tree is None:
return True
result = True
if tree.left:
assert tree.left.item < tree.item
result &= result and test(tree.left)
if tree.right:
assert tree.right.item >= tree.item
result &= result and test(tree.right)
return result
tree = None
for n in random.sample(range(20), 20):
tree = insert(tree, n)
print("test(tree) =", test(tree))
print(", ".join(str(item) for item in traverse_in_order(tree)))
print('min(tree) =', min(tree))
print('\n')
print_in_order(tree)
print('\n')
t = Tree(1)
t = delete(t, 1)
print_in_order(t)
print('\n')
t = Tree(12, left=Tree(5))
print_in_order(t)
print('\n')
t = delete(t, 5)
print_in_order(t)
print('\n')
t = Tree(12, left=Tree(5), right=Tree(16))
print_in_order(t)
print('\n')
t = delete(t, 5)
print_in_order(t)
print('\n')
t = Tree(12, left=Tree(5), right=Tree(16))
print_in_order(t)
print('\n')
t = delete(t, 16)
print_in_order(t)
print('\n')
t = Tree(12, left=Tree(5), right=Tree(16))
print_in_order(t)
print('\n')
t = delete(t, 12)
print_in_order(t)
print('\n')
t = Tree(12, left=Tree(5))
print_in_order(t)
print('\n')
t = delete(t, 12)
print_in_order(t)
print('\n')
t = Tree(3, right=Tree(12, left=Tree(5), right=Tree(16, Tree(15),Tree(17))))
print_in_order(t)
print('\n')
t = delete(t, 12)
print_in_order(t)
print('\n')
t = Tree(12, left=Tree(5), right=Tree(16, Tree(15),Tree(17)))
print_in_order(t)
print('\n')
t = delete(t, 12)
print_in_order(t)
print('\n')
t = Tree(12, left=Tree(5), right=Tree(16, right=Tree(17)))
print_in_order(t)
print('\n')
t = delete(t, 12)
print_in_order(t)
print('\n')
t = Tree(12, left=Tree(5), right=Tree(26, Tree(16, right=Tree(17, right=Tree(18))), Tree(27)))
print_in_order(t)
print('\n')
t = delete(t, 12)
print('-'*80)
for i in range(100):
tree = None
for n in random.sample(range(100), 20):
tree = insert(tree, n)
while tree:
if random.random() > 2.0/3.0:
tree = insert(tree, random.randint(0,99))
else:
tree = delete(tree, random.choice(list(traverse_in_order(tree))))
print_in_order(tree)
print("\n")
|
davegoopot/rube-mc-pi | rube_mc_pi/scratchplug.py | Python | gpl-2.0 | 3,671 | 0.00681 | """
Adaptor for scratch using the scratchpy interface
To function as a source, the scratch program should just broadcast a block state
e.g. broadcast 41,0
To function as a target, the scratch program should listen for broadcasts saying
either BLOCK_PRESENT or BLOCK_ABSENT. Implementation of individual block types
has not yet been implemented
The JSON config looks like this:
{ "source": {
"type": "scratchplug",
"server_address": "localhost",
"server_port": 42001
},
"target": {
"type": "scratchplug",
"server_address": "localhost",
"server_port": 42001
}
}
"""
import rube_mc_pi.mcpi.block as block
from multiprocessing import Process, Queue
from Queue import Empty
import rube_mc_pi.rube as rube
import scratch as s | cratch_ex
class ScratchplugSource(rube.Source): #pylint: disable=R0903
"""Wait for a broadcast from scratch showing that the block has changed
The standard scratch.receive() method blocks until a message is received.
This implementation wraps the call in a multiprocessing loop to give up
if nothing is rece | ived before a time out
"""
def __init__(self, attribs):
super(ScratchplugSource, self).__init__()
self.scratch_link = ScratchLink(attribs)
self.last_block_state = block.AIR
self.from_scratch = None
def receive_from_scratch(self, queue):
"""Look for a "x,y" broadcast and convert to a block.
If a broadcast is received that doesn't parse into x,y then leave the
state unchanged.
"""
self.from_scratch = None
received = self.scratch_link.scratch_connection.receive()[1]
try:
id_, data = received.split(",")
_block = block.Block(int(id_), int(data))
except ValueError:
#Assume that the broadcast wasn't intended as a block change
print("Unparsable broadcast:" + received)
_block = None
if _block:
queue.put(_block)
def poll_state(self):
"""Assumes that scratch will broadcast "x,y" to represent the
block state.
"""
queue = Queue()
proc = Process(target=self.receive_from_scratch, args=(queue,))
proc.start()
timout_secs = 3
proc.join(timout_secs)
if proc.is_alive():
proc.terminate()
proc.join()
try:
_block = queue.get_nowait()
self.last_block_state = _block
except Empty:
pass
return self.last_block_state
class ScratchplugTarget(rube.Target): #pylint: disable=R0903
"""Send a broadcast to scratch with either "BLOCK_PRESENT" or "BLOCK_ABSENT"
"""
def __init__(self, attribs):
super(ScratchplugTarget, self).__init__()
self.scratch_link = ScratchLink(attribs)
def update_state(self, block_):
if block_ != block.AIR:
self.scratch_link.scratch_connection.broadcast("BLOCK_PRESENT")
else:
self.scratch_link.scratch_connection.broadcast("BLOCK_ABSENT")
class ScratchLink(object):
"""
Responsible for storing the state of the link to a server
"""
def __init__(self, attribs):
self.server_address = attribs["server_address"]
self.server_port = attribs.get("server_port", 42001)
self.scratch_connection = scratch_ex.Scratch(self.server_address,
port=self.server_port)
|
openstack/murano | murano/tests/unit/packages/versions/test_mpl_v1.py | Python | apache-2.0 | 1,639 | 0 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import imghdr
import os
import murano.packages.load_utils as load_utils
import murano.tests.unit.base as test_base
class TestMplV1(test_base.MuranoTestCase):
def test_supplier_info_load(self):
pack | age_dir = os.path.abspath(
| os.path.join(__file__, '../../test_packages/test.mpl.v1.app')
)
package = load_utils.load_from_dir(package_dir)
self.assertIsNotNone(package.supplier)
self.assertEqual('Supplier Name', package.supplier['Name'])
self.assertEqual({'Link': 'http://example.com',
'Text': 'Example Company'},
package.supplier['CompanyUrl'])
self.assertEqual(
'Company summary goes here',
package.supplier['Summary']
)
self.assertEqual(
'Marked up company description goes here',
package.supplier['Description']
)
self.assertEqual('test_supplier_logo.png', package.supplier['Logo'])
self.assertEqual('png', imghdr.what('', package.supplier_logo))
|
amsehili/auditok | tests/test_AudioSource.py | Python | mit | 31,465 | 0 | """
@author: Amine Sehili <amine.sehili@gmail.com>
"""
from array import array
import unittest
from genty import genty, genty_dataset
from auditok.io import (
AudioParameterError,
BufferAudioSource,
RawAudioSource,
WaveAudioSource,
)
from auditok.signal import FORMAT
from test_util import PURE_TONE_DICT, _sample_generator
def audio_source_read_all_gen(audio_source, size=None):
if size is None:
size = int(audio_source.sr * 0.1) # 100ms
while True:
data = audio_source.read(size)
if data is None:
break
yield data
@genty
class TestAudioSource(unittest.TestCase):
# TODO when use_channel is None, return samples from all channels
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_BufferAudioSource_read_all(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.raw".format(file_suffix)
with open(file, "rb") as fp:
expected = fp.read()
channels = len(frequencies)
audio_source = BufferAudioSource(expected, 16000, 2, channels)
audio_source.open()
data = audio_source.read(None)
self.assertEqual(data, expected)
audio_source.rewind()
data = audio_source.read(-10)
self.assertEqual(data, expected)
audio_source.close()
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_RawAudioSource(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.raw".format(file_suffix)
channels = len(frequencies)
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = b"".join(audio_source_read_all_gen(audio_source))
audio_source.close()
mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
fmt = FORMAT[audio_source.sample_width]
expected = array(fmt, _sample_generator(*mono_channels)).tobytes()
self.assertEqual(data_read_all, expected)
# assert read all data with None
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = audio_source.read(None)
audio_source.close()
self.assertEqual(data_read_all, expected)
# assert read all data with a negative size
audio_source = RawAudioSource(file, 16000, 2, channels)
audio_source.open()
data_read_all = audio_source.read(-10)
audio_source.close()
self.assertEqual(data_read_all, expected)
@genty_dataset(
mono=("mono_400Hz", (400,)),
multichannel=("3channel_400-800-1600Hz", (400, 800, 1600)),
)
def test_WaveAudioSource(self, file_suffix, frequencies):
file = "tests/data/test_16KHZ_{}.wav".format(file_suffix)
audio_source = WaveAudioSource(file)
audio_source.open()
data = b"".join(audio_source_read_all_gen(audio_source))
audio_source.close()
mono_channels = [PURE_TONE_DICT[freq] for freq in frequencies]
fmt = FORMAT[audio_source.sample_width]
expected = array(fmt, _sample_generator(*mono_channels)).tobytes()
self.assertEqual(data, expected)
# assert read all data with None
audio_source = WaveAudioSource(file)
audio_source.open()
data_read_all = audio_source.read(None)
audio_source.close()
self.assertEqual(data_read_all, expected)
# assert read all data with a negative size
audio_source = WaveAu | dioSource(file)
audio_source.open()
data_read_all = audio_source.read(-10)
audio_source.close()
self.assertEqual(data_read_all, expected)
@genty
class TestBufferAudioSource_SR10_SW1_CH1(unittest.TestCase):
def setUp(self):
self.data | = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ012345"
self.audio_source = BufferAudioSource(
data=self.data, sampling_rate=10, sample_width=1, channels=1
)
self.audio_source.open()
def tearDown(self):
self.audio_source.close()
def test_sr10_sw1_ch1_read_1(self):
block = self.audio_source.read(1)
exp = b"A"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_6(self):
block = self.audio_source.read(6)
exp = b"ABCDEF"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_multiple(self):
block = self.audio_source.read(1)
exp = b"A"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(6)
exp = b"BCDEFG"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(13)
exp = b"HIJKLMNOPQRST"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
block = self.audio_source.read(9999)
exp = b"UVWXYZ012345"
self.assertEqual(
block,
exp,
msg="wrong block, expected: {}, found: {} ".format(exp, block),
)
def test_sr10_sw1_ch1_read_all(self):
block = self.audio_source.read(9999)
self.assertEqual(
block,
self.data,
msg="wrong block, expected: {}, found: {} ".format(
self.data, block
),
)
block = self.audio_source.read(1)
self.assertEqual(
block,
None,
msg="wrong block, expected: {}, found: {} ".format(None, block),
)
def test_sr10_sw1_ch1_sampling_rate(self):
srate = self.audio_source.sampling_rate
self.assertEqual(
srate,
10,
msg="wrong sampling rate, expected: 10, found: {0} ".format(srate),
)
def test_sr10_sw1_ch1_sample_width(self):
swidth = self.audio_source.sample_width
self.assertEqual(
swidth,
1,
msg="wrong sample width, expected: 1, found: {0} ".format(swidth),
)
def test_sr10_sw1_ch1_channels(self):
channels = self.audio_source.channels
self.assertEqual(
channels,
1,
msg="wrong number of channels, expected: 1, found: {0} ".format(
channels
),
)
@genty_dataset(
empty=([], 0, 0, 0),
zero=([0], 0, 0, 0),
five=([5], 5, 0.5, 500),
multiple=([5, 20], 25, 2.5, 2500),
)
def test_position(
self, block_sizes, expected_sample, expected_second, expected_ms
):
for block_size in block_sizes:
self.audio_source.read(block_size)
position = self.audio_source.position
self.assertEqual(
position,
expected_sample,
msg="wrong stream position, expected: {}, found: {}".format(
expected_sample, position
),
)
position_s = self.audio_source.position_s
self.assertEqual(
position_s,
expected_second,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_second, position_s
),
)
position_ms = self.audio_source.position_ms
self.assertEqual(
position_ms,
expected_ms,
msg="wrong stream position_s, expected: {}, found: {}".format(
expected_ms, position_ms
),
)
@genty_dataset(
zero=(0, 0, 0, 0),
one=(1, 1, 0.1, 100),
ten=(10, 10, 1, 1000),
negative_1=(-1, 31, 3.1, 3100),
nega |
seba3c/scamera | notifications/migrations/0002_auto_20161014_0113.py | Python | gpl-3.0 | 656 | 0.001524 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-14 01:13
from __future__ import unicode_li | terals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
| ]
operations = [
migrations.AddField(
model_name='telegrambot',
name='module_name',
field=models.CharField(max_length=25, null=True),
),
migrations.AlterField(
model_name='telegrambot',
name='name',
field=models.CharField(max_length=25, primary_key=True, serialize=False),
),
]
|
labordoc/labordoc-next | modules/bibrank/lib/bibrank_tag_based_indexer_unit_tests.py | Python | gpl-2.0 | 1,743 | 0.010327 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the ranking engine."""
__revision__ = "$Id$"
from invenio.importutils import lazy_import
from invenio.testutils import make_test_suite, run_test_suite, InvenioTestCase
bibrank_tag_based_indexer = lazy_import('invenio.bibrank_t | ag_based_indexer')
split_ranges = lazy_import('invenio.bibrank:split_ranges')
class TestListSetOperations(InvenioTestCase):
"""Test list set operations."""
def test_union_dicts(self):
"""bibrank tag based indexer - union dicts"""
self.assertEqual({1: 5, 2: 6, 3: 9, 4: 10, 10: 1}, bibrank_tag_based_indexer.union_dicts({1: 5, 2: 6, 3: 9}, {3:9, 4:10, 10: 1}))
def test_split_ranges(self):
"""bibrank tag based indexer - split ranges"""
s | elf.assertEqual([[0, 500], [600, 1000]], split_ranges("0-500,600-1000"))
TEST_SUITE = make_test_suite(TestListSetOperations,)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
NSLS-II-XPD/ipython_ophyd | profile_collection_germ/acceptance_tests/01-count-pe1.py | Python | bsd-2-clause | 109 | 0 | from bluesky.plans | import count
from bluesky.callbacks import LiveTable
RE(count([pe1]), LiveTable([pe1 | ]))
|
jonathon-love/snapcraft | snapcraft/_schema.py | Python | gpl-3.0 | 2,896 | 0.000345 | #!/usr/bin/python3
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import jsonschema
import yaml
from snapcraft.internal import common
class SnapcraftSchemaError(Exception):
@property
def message(self):
return self._message
def __init__(self, message):
self._message = message
class Validator:
def __init__(self, snapcraft_yaml=None):
"""Create a validation instance for snapcraft_yaml."""
self._snapcraft = snapcraft_yaml if snapcraft_yaml else {}
self._load_schema()
@property
def schema(self):
"""Return all schema properties."""
return self._schema['properties'].copy()
@property
def part_schema(self):
"""Return part-specific schema properties."""
sub = self.schema['parts']['patternProperties']
properties = sub['^(?!plugins$)[a-z0-9][a-z0-9+-\/]*$']['properties']
return properties
def _load_schema(self):
schema_file = os.path.abspath(os.path.join(
common.get_schemadir(), 'snapcraft.yaml'))
try:
with open(schema_file) as fp:
self._schema = yaml.load(fp)
except FileNotFoundError:
raise SnapcraftSchemaError(
'snapcraft validation file is missing from installation path')
def validate(self):
format_check = jsonschema.FormatChecker()
try:
jsonschema.validate(
self._snapcr | aft, self._schema, format_checker=format_check)
except jsonschema.ValidationError as e:
messages = [e.message]
| path = []
while e.absolute_path:
element = e.absolute_path.popleft()
# assume numbers are indices and use 'xxx[123]' notation.
if isinstance(element, int):
path[-1] = '{}[{}]'.format(path[-1], element)
else:
path.append(str(element))
if path:
messages.insert(0, "The '{}' property does not match the "
"required schema:".format('/'.join(path)))
if e.cause:
messages.append('({})'.format(e.cause))
raise SnapcraftSchemaError(' '.join(messages))
|
ewindisch/nova | nova/tests/compute/test_virtapi.py | Python | apache-2.0 | 7,425 | 0 | # Co | pyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License | is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import mox
from nova.compute import manager as compute_manager
from nova import context
from nova import db
from nova import exception
from nova.objects import external_event as external_event_obj
from nova import test
from nova.virt import fake
from nova.virt import virtapi
class VirtAPIBaseTest(test.NoDBTestCase, test.APICoverage):
cover_api = virtapi.VirtAPI
def setUp(self):
super(VirtAPIBaseTest, self).setUp()
self.context = context.RequestContext('fake-user', 'fake-project')
self.set_up_virtapi()
def set_up_virtapi(self):
self.virtapi = virtapi.VirtAPI()
def assertExpected(self, method, *args, **kwargs):
self.assertRaises(NotImplementedError,
getattr(self.virtapi, method), self.context,
*args, **kwargs)
def test_instance_update(self):
self.assertExpected('instance_update', 'fake-uuid',
dict(host='foohost'))
def test_provider_fw_rule_get_all(self):
self.assertExpected('provider_fw_rule_get_all')
def test_agent_build_get_by_triple(self):
self.assertExpected('agent_build_get_by_triple',
'fake-hv', 'gnu/hurd', 'fake-arch')
def test_wait_for_instance_event(self):
self.assertExpected('wait_for_instance_event',
'instance', ['event'])
class FakeVirtAPITest(VirtAPIBaseTest):
cover_api = fake.FakeVirtAPI
def set_up_virtapi(self):
self.virtapi = fake.FakeVirtAPI()
def assertExpected(self, method, *args, **kwargs):
if method == 'wait_for_instance_event':
run = False
with self.virtapi.wait_for_instance_event(*args, **kwargs):
run = True
self.assertTrue(run)
return
if method == 'instance_update':
# NOTE(danms): instance_update actually becomes the other variant
# in FakeVirtAPI
db_method = 'instance_update_and_get_original'
else:
db_method = method
self.mox.StubOutWithMock(db, db_method)
if method in ('aggregate_metadata_add', 'aggregate_metadata_delete',
'security_group_rule_get_by_security_group'):
# NOTE(danms): FakeVirtAPI will convert the first argument to
# argument['id'], so expect that in the actual db call
e_args = tuple([args[0]['id']] + list(args[1:]))
elif method == 'security_group_get_by_instance':
e_args = tuple([args[0]['uuid']] + list(args[1:]))
else:
e_args = args
getattr(db, db_method)(self.context, *e_args, **kwargs).AndReturn(
'it worked')
self.mox.ReplayAll()
result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
self.assertEqual(result, 'it worked')
class FakeCompute(object):
def __init__(self):
self.conductor_api = mox.MockAnything()
self.db = mox.MockAnything()
self._events = []
self.instance_events = mock.MagicMock()
self.instance_events.prepare_for_instance_event.side_effect = \
self._prepare_for_instance_event
def _instance_update(self, context, instance_uuid, **kwargs):
# NOTE(danms): Fake this behavior from compute/manager::ComputeManager
return self.conductor_api.instance_update(context,
instance_uuid, kwargs)
def _event_waiter(self):
event = mock.MagicMock()
event.status = 'completed'
return event
def _prepare_for_instance_event(self, instance, event_name):
m = mock.MagicMock()
m.instance = instance
m.event_name = event_name
m.wait.side_effect = self._event_waiter
self._events.append(m)
return m
class ComputeVirtAPITest(VirtAPIBaseTest):
cover_api = compute_manager.ComputeVirtAPI
def set_up_virtapi(self):
self.compute = FakeCompute()
self.virtapi = compute_manager.ComputeVirtAPI(self.compute)
def assertExpected(self, method, *args, **kwargs):
self.mox.StubOutWithMock(self.compute.conductor_api, method)
getattr(self.compute.conductor_api, method)(
self.context, *args, **kwargs).AndReturn('it worked')
self.mox.ReplayAll()
result = getattr(self.virtapi, method)(self.context, *args, **kwargs)
self.assertEqual(result, 'it worked')
def test_wait_for_instance_event(self):
and_i_ran = ''
event_1_tag = external_event_obj.InstanceExternalEvent.make_key(
'event1')
event_2_tag = external_event_obj.InstanceExternalEvent.make_key(
'event2', 'tag')
events = {
'event1': event_1_tag,
('event2', 'tag'): event_2_tag,
}
with self.virtapi.wait_for_instance_event('instance', events.keys()):
and_i_ran = 'I ran so far a-waa-y'
self.assertEqual('I ran so far a-waa-y', and_i_ran)
self.assertEqual(2, len(self.compute._events))
for event in self.compute._events:
self.assertEqual('instance', event.instance)
self.assertIn(event.event_name, events.values())
event.wait.assert_called_once_with()
def test_wait_for_instance_event_failed(self):
def _failer():
event = mock.MagicMock()
event.status = 'failed'
return event
@mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
def do_test():
with self.virtapi.wait_for_instance_event('instance', ['foo']):
pass
self.assertRaises(exception.NovaException, do_test)
def test_wait_for_instance_event_failed_callback(self):
def _failer():
event = mock.MagicMock()
event.status = 'failed'
return event
@mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
def do_test():
callback = mock.MagicMock()
with self.virtapi.wait_for_instance_event('instance', ['foo'],
error_callback=callback):
pass
callback.assert_called_with('foo', 'instance')
do_test()
def test_wait_for_instance_event_timeout(self):
class TestException(Exception):
pass
def _failer():
raise TestException()
@mock.patch.object(self.virtapi._compute, '_event_waiter', _failer)
@mock.patch('eventlet.timeout.Timeout')
def do_test(timeout):
with self.virtapi.wait_for_instance_event('instance', ['foo']):
pass
self.assertRaises(TestException, do_test)
|
nareshppts/account_chart_update | currency_rate_update/currency_rate_update.py | Python | agpl-3.0 | 29,434 | 0.00683 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 Camptocamp SA
# @author Nicolas Bessi
# @source JBA and AWST inpiration
# @contributor Grzegorz Grzelak (grzegorz.grzelak@birdglobe.com), Joel Grand-Guillaume
# Copyright (c) 2010 Alexis de Lattre (alexis@via.ecp.fr)
# - ported XML-based webservices (Admin.ch, ECB, PL NBP) to new XML lib
# - rates given by ECB webservice is now correct even when main_cur <> EUR
# - rates given by PL_NBP webservice is now correct even when main_cur <> PLN
# - if company_currency <> CHF, you can now update CHF via Admin.ch webservice
# (same for EUR with ECB webservice and PLN with NBP webservice)
# For more details, see Launchpad bug #645263
# - mecanism to check if rates given by the webservice are "fresh" enough to be
# written in OpenERP ('max_delta_days' parameter for each currency update service)
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
# TODO "nice to have" : restain the list of currencies that can be added for
# a webservice to the list of currencies supported by the Webservice
# TODO : implement max_delta_days for Yahoo webservice
from osv import osv, fields
import time
from datetime import datetime, timedelta
import logging
from tools.translate import _
class Currency_rate_update_service(osv.osv):
"""Class thats tell for wich services wich currencies
have to be updated"""
_name = "currency.rate.update.service"
_description = "Currency Rate Update"
_columns = {
# #list of webservicies the value sould be a class name
'service' : fields.selection(
[
('Admin_ch_getter', 'Admin.ch'),
('ECB_getter', 'European Central Bank'),
('Yahoo_getter', 'Yahoo Finance '),
('PL_NBP_getter', 'Narodowy Bank Polski'), # Added for polish rates
('Banxico_getter', 'Banco de México'), # Added for mexican rates
],
"Webservice to use",
required=True
),
# #list of currency to update
'currency_to_update' : fields.many2many(
'res.currency',
'res_curreny_auto_udate_rel',
'service_id',
'currency_id',
'currency to update with this service',
),
# back ref
'company_id' : fields.many2one(
| 'res.company',
'linked company',
),
# #note fileds that will be used as a logger
'note':fields.text('update notice'),
'max_delta_days': fields.integer('Max delta days', required=True, help="If the time delta between the rate date given by the webservice and the current date exeeds this va | lue, then the currency rate is not updated in OpenERP."),
}
_defaults = {
'max_delta_days': lambda *a: 4,
}
_sql_constraints = [
(
'curr_service_unique',
'unique (service, company_id)',
_('You can use a service one time per company !')
)
]
def _check_max_delta_days(self, cr, uid, ids):
for company in self.read(cr, uid, ids, ['max_delta_days']):
if company['max_delta_days'] >= 0:
continue
else:
return False
return True
_constraints = [
(_check_max_delta_days, "'Max delta days' must be >= 0", ['max_delta_days']),
]
Currency_rate_update_service()
class Currency_rate_update(osv.osv):
"""Class that handle an ir cron call who will
update currencies based on a web url"""
_name = "currency.rate.update"
_description = "Currency Rate Update"
# #dict that represent a cron object
cron = {
'active' : False,
'priority' : 1,
'interval_number' : 1,
'interval_type' : 'weeks',
'nextcall' : time.strftime("%Y-%m-%d %H:%M:%S", (datetime.today() + timedelta(days=1)).timetuple()), # tomorrow same time
'numbercall' :-1,
'doall' : True,
'model' : 'currency.rate.update',
'function' : 'run_currency_update',
'args' : '()',
}
logger = logging.getLogger(__name__)
MOD_NAME = 'currency_rate_update: '
def get_cron_id(self, cr, uid, context):
"""return the updater cron's id. Create one if the cron does not exists """
cron_id = 0
cron_obj = self.pool.get('ir.cron')
try:
# find the cron that send messages
cron_id = cron_obj.search(
cr,
uid,
[
('function', 'ilike', self.cron['function']),
('model', 'ilike', self.cron['model'])
],
context={
'active_test': False
}
)
cron_id = int(cron_id[0])
except Exception, e :
self.logger.info('warning cron not found one will be created')
pass # ignore if the cron is missing cause we are going to create it in db
# the cron does not exists
if not cron_id :
# translate
self.cron['name'] = _('Currency Rate Update')
cron_id = cron_obj.create(cr, uid, self.cron, context)
return cron_id
def save_cron(self, cr, uid, datas, context={}):
"""save the cron config data should be a dict"""
# modify the cron
cron_id = self.get_cron_id(cr, uid, context)
result = self.pool.get('ir.cron').write(cr, uid, [cron_id], datas)
def run_currency_update(self, cr, uid):
"update currency at the given frequence"
factor |
Azure/azure-sdk-for-python | sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_09_01/operations/_users_operations.py | Python | mit | 21,208 | 0.004762 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsersOperations(object):
"""UsersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.UserList"]
"""Gets all the users registered on a Data Box Edge/Data Box Gateway device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param expand: Specify $expand=details to populate additional fields related to the resource or
Specify $skipToken=:code:`<token>` to populate the next page in the list.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UserList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01.models.UserList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UserList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # | type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 's | tr'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UserList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/users'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.User"
"""Gets the properties of the specified user.
:param device_name: The device name.
:type device_name: str
:param name: The user name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: User, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01.models.User
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.User"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request |
jmacmahon/lumberjack | lumberjack/actions.py | Python | gpl-3.0 | 7,907 | 0 | # -*- coding: utf-8 -*-
#
# This file is part of Lumberjack.
# Copyright 2014 CERN.
#
# Lumberjack is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Lumberjack is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lumberjack. If not, see <http://www.gnu.org/licenses/>.
"""Provide the ActionQueue class."""
from __future__ import absolute_import
from elasticsearch import ElasticsearchException, TransportError
from elasticsearch.helpers import bulk
from threading import Thread, Event, Lock
from json import dumps
import traceback
import logging
from copy import deepcopy
class ActionQueue(Thread):
"""Hold a queue of actions and a thread to bulk-perform them.
This is instantiated automatically by the ``lumberjack.Lumberjack`` object.
It will keep a queue of indexing actions to be performed in Elasticsearch,
and perform them bulk ('flush') when one of three things happens:
1. It has waited ``interval`` seconds without flushing, or
2. The length of the queue has exceeded ``max_queue_length``, or
3. A flush is triggered manually.
:note: You should not need to instantiate, or even interact with, this
yourself. It is intended to be wrapped by ``lumberjack.Lumberjack``.
If you do, for some reason, use this yourself, it is a subclass of
``threading.Thread``, so you should call its ``start()`` method after
initialisation.
:param elasticsearch: The ``elasticsearch.Elasticsearch`` object on which
to perform the bulk indexing.
:param config: The Lumberjack config. See the Configuration section in the
docs for details.
"""
def __init__(self, elasticsearch, config):
"""Init method. See class docstring."""
super(ActionQueue, self).__init__()
self.elasticsearch = elasticsearch
self.config = config
self.queue = []
self._flush_event = Event()
self.queue_lock = Lock()
self.exceptions = []
self.running = True
self.logger = logging.getLogger(__name__)
self.daemon = True
# So we can monkey-patch these in testing
self._bulk = bulk
self._open = open
@property
def last_exception(self):
"""The last exception raised in the ActionQueue thread."""
if len(self.exceptions) == 0:
return None
else:
return self.exceptions[-1]
def _run_postprocessors(self, queue_item):
action, postprocessors = queue_item
for postprocessor in postprocessors:
try:
action['_source'] = postprocessor(deepcopy(action['_source']))
except Exception:
self.logger.error('Postprocessor %s raised an exception.' %
repr(postprocessor), exc_info=True)
return action
def _flush(self):
"""Perform all actions in the queue.
Uses elasticsearch.helpers.bulk, and empties the queue on success.
Uses the ``self.queue_lock`` to prevent a race condition.
"""
with self.queue_lock:
queue = list(self.queue)
self.queue = []
actions = map(self._run_postprocessors, queue)
try:
self._bulk(self.elasticsearch, actions)
except TransportError:
self.logger.error('Error in flushing queue. Falling back to file.',
exc_info=True)
try:
with self._open(self.config['fallback_log_file'],
'a') as log_file:
json_lines = map(lambda doc: dumps(doc) + '\n', actions)
for line in json_lines:
log_file.write(line)
except IOError:
self.logger.error('Error in fallback log. Lost %d logs.',
len(actions), exc_info=True)
else:
self.logger.debug('Flushed %d logs into Elasticsearch.',
| len(actions))
def run(self):
"""The main method for the ActionQueue thread.
Called by the ``start()`` method. Not to be called directly.
"""
while (self.running or len(self.que | ue) > 0):
try:
self._flush()
except Exception as exc:
self.logger.error(
'Unexpected exception in actions thread. ' +
'Continuing anyway.',
exc_info=True)
self.exceptions.append(exc)
finally:
self._flush_event.clear()
interval = self.config['interval']
try:
triggered = self._flush_event.wait(interval)
# Catch a weird bug in Python threading. See tests.
except TypeError:
self.logger.debug('Caught TypeError from Event.wait(). ' +
'This is expected only during ' +
'interpreter shutdown.', exc_info=True)
return
if triggered:
self.logger.debug('Flushing on external trigger.')
else:
self.logger.debug(
'Flushing after timeout of %.1fs.', interval)
# These two methods to be called externally, i.e. from the main thread.
# TODO: Consider refactoring.
def trigger_flush(self):
"""Manually trigger a flush of the queue.
This is to be called from the main thread, and fires an interrupt in
the timeout of the main loop. As such it is not guaranteed to
immediately trigger a flush, only to skip the countdown to the next
one. This means the flush will happen the next time this thread gets
switched to by the Python interpreter.
"""
self.logger.debug('Flush triggered; setting event object.')
self._flush_event.set()
def queue_index(self, suffix, doc_type, body, postprocessors=None):
"""Queue a new document to be added to Elasticsearch.
If the queue becomes longer than self.max_queue_length then a flush is
automatically triggered.
:param suffix: The suffix of the index into which we should index the
document.
:param doc_type: The Elasticsearch type of the document to be indexed.
Usually this should correspond to a registered schema in
Lumberjack.
:param body: The actual document contents, as a dict.
:param postprocessors: Any post-processing functions to be run on the
document before indexing.
"""
postprocessors = postprocessors if postprocessors is not None else []
action = {
'_op_type': 'index',
'_index': self.config['index_prefix'] + suffix,
'_type': doc_type,
'_source': body
}
with self.queue_lock:
self.queue.append((action, postprocessors))
self.logger.debug(
'Put an action in the queue. qlen = %d, doc_type = %s',
len(self.queue), doc_type)
# TODO: do default schema
if self.config['max_queue_length'] is not None and \
len(self.queue) >= self.config['max_queue_length']:
self.logger.debug('Hit max_queue_length.')
self.trigger_flush()
|
catmaid/CATMAID | django/applications/catmaid/tests/apis/test_messages.py | Python | gpl-3.0 | 3,008 | 0.001662 | # -*- coding: utf-8 -*-
import json
from catmaid.models import Message
from .common import CatmaidApiTestCase
class MessagesApiTests(CatmaidApiTestCase):
def test_read_message_error(self):
self.fake_authentication()
message_id = 5050
response = self.client.post(f'/messages/{message_id}/mark_read')
self.assertEqual(response.status_code, 404)
parsed_response = json.loads(response.content.decode('utf-8'))
self.assertIn('error', parsed_response)
self.assertIn('type', parsed_response)
self.assertEquals('Http404', parsed_response['type'])
def test_read_message_without_action(self):
self.fake_authentication()
message_id = 3
response = self.client.post(f'/messages/{message_id}/mark_read')
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
message = Message.objects.get(id=message_id)
self.assertEqual(True, message.read)
self.assertTrue(parsed_response.get('success'))
def test_read_message_with_action(self):
self.fake_authentication()
message_id = 1
response = self.client.post(f'/messages/{message_id}/mark_read')
self.assertEqual(response.status_code, 302)
message = Message.objects.filter(id=message_id)[0]
self.assertEqual(True, message.read)
def test_list_messages(self):
self.fake_authentication()
response = self.client.post(
'/messages/list', {})
self.assertStatus(response)
parsed_response = json.loads(response.content.decode('utf-8'))
def get_message(data, id):
msgs = [d for d in data if d['id'] == id]
if len(msgs) != 1:
raise ValueError("Malformed message data")
return msgs[0]
expected_result = {
'0': {
'action': '',
'id': 3,
'text': 'Contents of message 3.',
'time': '2014-10-05 11:12:01.360422+00:00',
'title': 'Message 3'
},
'1': {
'action': 'http://www.example.com/message2',
| 'id': 2,
'text': 'Contents of message 2.',
'time': '2011-12-20 16:46:01.360422+00:00',
'title': 'Message 2'
},
| '2': {
'action': 'http://www.example.com/message1',
'id': 1,
'text': 'Contents of message 1.',
'time': '2011-12-19 16:46:01+00:00',
'title': 'Message 1'
},
'3': {
'id': -1,
'notification_count': 0
}
}
# Check result independent from order
for mi in ('0','1','2','3'):
self.assertEqual(expected_result[mi], parsed_response[mi])
|
canvasnetworks/canvas | website/drawquest/apps/whitelisting/tests.py | Python | bsd-3-clause | 2,635 | 0.004934 | import time
from django.conf import | settings
from drawquest.tests.tests_helpers import (CanvasTestCase, create_content, create_user, | create_group,
create_comment, create_staff, create_quest, create_quest_comment,
create_current_quest)
from canvas.redis_models import redis
from services import Services, override_service
from drawquest.apps.whitelisting.models import allow, deny, moderate
class TestWhitelisting(CanvasTestCase):
def after_setUp(self):
self.quest = create_current_quest()
self.content = create_content()
self._enable()
settings.CACHE_KEY_PREFIX = 'DQv' + str(int(settings.CACHE_KEY_PREFIX[-1]) + 1)
def _enable(self):
self.api_post('/api/whitelisting/enable', {}, user=create_staff())
def _disable(self):
self.api_post('/api/whitelisting/disable', {}, user=create_staff())
def _gallery(self):
return self.api_post('/api/quests/comments', {'quest_id': self.quest.id})['comments']
def _assert_not_in_gallery(self, comment):
self.assertFalse(str(comment.id) in [str(c['id']) for c in self._gallery()])
def _assert_in_gallery(self, comment):
self.assertTrue(str(comment.id) in [str(c['id']) for c in self._gallery()])
def test_unjudged(self):
cmt = create_quest_comment(self.quest)
self._assert_not_in_gallery(cmt)
def test_allow(self):
cmt = create_quest_comment(self.quest)
self.api_post('/api/whitelisting/allow', {'comment_id': cmt.id}, user=create_staff())
self._assert_in_gallery(cmt)
def test_deny(self):
cmt = create_quest_comment(self.quest)
self.api_post('/api/whitelisting/deny', {'comment_id': cmt.id}, user=create_staff())
self._assert_not_in_gallery(cmt)
def test_rejudge(self):
cmt = create_quest_comment(self.quest)
self.api_post('/api/whitelisting/deny', {'comment_id': cmt.id}, user=create_staff())
self._assert_not_in_gallery(cmt)
self.api_post('/api/whitelisting/allow', {'comment_id': cmt.id}, user=create_staff())
self._assert_in_gallery(cmt)
def test_disable(self):
cmt = create_quest_comment(self.quest)
self._assert_not_in_gallery(cmt)
self._disable()
self._assert_in_gallery(cmt)
def test_reenable(self):
cmt = create_quest_comment(self.quest)
self._assert_not_in_gallery(cmt)
self._disable()
self._assert_in_gallery(cmt)
self._enable()
self._assert_not_in_gallery(cmt)
|
ilogue/niprov | niprov/formatfactory.py | Python | bsd-3-clause | 1,093 | 0 | from niprov.dependencies import Dependencies
from niprov.formatjson import JsonFormat
from niprov.formatxml import XmlFormat
from niprov.formatnarrated import NarratedFormat
from niprov.formatsimple import SimpleFormat
from niprov.formatdict import DictFormat
from niprov.formatobject import ObjectFormat
from niprov.pictu | res import Pict | ureCache
class FormatFactory(object):
def __init__(self, dependencies=Dependencies()):
self.dependencies = dependencies
def create(self, formatName):
if formatName == 'json':
return JsonFormat(self.dependencies)
if formatName == 'xml':
return XmlFormat(self.dependencies)
if formatName == 'narrated':
return NarratedFormat()
if formatName == 'simple':
return SimpleFormat()
if formatName == 'dict':
return DictFormat()
if formatName == 'object':
return ObjectFormat()
if formatName == 'picture':
return PictureCache(self.dependencies)
raise ValueError('Unknown format: '+str(formatName))
|
mpaulweeks/exploitable-comic | py/current.py | Python | mit | 883 | 0 |
import os
import requests
import shutil
from bs4 import BeautifulSoup
from datetime import datetime
from .s3 import upload_file
def current():
url = 'http://intelligentlifecomics.com'
req = requests.get(url)
if(req.status_code != 200):
return
soup = BeautifulSoup(req.text, 'html.parser')
date_str = soup.find(id="comic-date").get_text(). | strip()
date_object = datetime.strptime(date_str, '%B %d, %Y')
date_fmt = date_object.strftime('%Y-%m-%d')
file_path = "temp/%s.gif" % date_fmt
img_url = soup.find(id='comicpanel').find('img').get('src')
r = requests.get(img_url, stream=True)
if r.status_code == 200:
with open(file_path, 'wb') as f:
r.raw. | decode_content = True
shutil.copyfileobj(r.raw, f)
upload_file(file_path)
os.remove(file_path)
if __name__ == "__main__":
current()
|
fastinetserver/portage-idfetch | pym/portage/_selinux.py | Python | gpl-2.0 | 3,723 | 0.028472 | # Copyright 1999-2009 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
# Don't use the unicode-wrapped os and shutil modules here since
# the whole _selinux module itself will be wrapped.
import os
import shutil
import portage
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
from portage.localization import _
import selinux
from selinux import is_selinux_enabled
def copyfile(src, dest):
src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
dest = _unicode_encode(dest, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.lgetfilecon(src)
if rc < 0:
src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
raise OSError(_("copyfile: Failed getting context of \"%s\".") % src)
setfscreate(ctx)
try:
shutil.copyfile(src, dest)
finally:
setfscreate()
def getcontext():
(rc, ctx) = selinux.getcon()
if rc < 0:
raise OSError(_("getcontext: Failed getting current process context."))
return ctx
def mkdir(target, refdir):
target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
refdir = _unicode_encode(refdir, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.getfilecon(refdir)
if rc < 0:
refdir = _unicode_decode(refdir, encoding=_encodings['fs'],
errors='replace')
raise OSError(
_("mkdir: Failed getting context of reference directory \"%s\".") \
% refdir)
setfscreate(ctx)
try:
os.mkdir(target)
finally:
setfscreate()
def rename(src, dest):
src = _unicode_encode(src, encoding=_encodings['fs'], errors='strict')
dest = _unicode_encode(dest, encoding=_encodings['fs'], erro | rs='strict')
(rc, ctx) = selinux.lgetfilecon(src)
if rc < 0:
src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
raise OSError(_("rename: Failed getting context of \"%s\".") % src)
| setfscreate(ctx)
try:
os.rename(src,dest)
finally:
setfscreate()
def settype(newtype):
ret = getcontext().split(":")
ret[2] = newtype
return ":".join(ret)
def setexec(ctx="\n"):
ctx = _unicode_encode(ctx, encoding=_encodings['content'], errors='strict')
if selinux.setexeccon(ctx) < 0:
ctx = _unicode_decode(ctx, encoding=_encodings['content'],
errors='replace')
if selinux.security_getenforce() == 1:
raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
else:
portage.writemsg("!!! " + \
_("Failed setting exec() context \"%s\".") % ctx, \
noiselevel=-1)
def setfscreate(ctx="\n"):
ctx = _unicode_encode(ctx,
encoding=_encodings['content'], errors='strict')
if selinux.setfscreatecon(ctx) < 0:
ctx = _unicode_decode(ctx,
encoding=_encodings['content'], errors='replace')
raise OSError(
_("setfscreate: Failed setting fs create context \"%s\".") % ctx)
def spawn_wrapper(spawn_func, selinux_type):
selinux_type = _unicode_encode(selinux_type,
encoding=_encodings['content'], errors='strict')
def wrapper_func(*args, **kwargs):
con = settype(selinux_type)
setexec(con)
try:
return spawn_func(*args, **kwargs)
finally:
setexec()
return wrapper_func
def symlink(target, link, reflnk):
target = _unicode_encode(target, encoding=_encodings['fs'], errors='strict')
link = _unicode_encode(link, encoding=_encodings['fs'], errors='strict')
reflnk = _unicode_encode(reflnk, encoding=_encodings['fs'], errors='strict')
(rc, ctx) = selinux.lgetfilecon(reflnk)
if rc < 0:
reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'],
errors='replace')
raise OSError(
_("symlink: Failed getting context of reference symlink \"%s\".") \
% reflnk)
setfscreate(ctx)
try:
os.symlink(target, link)
finally:
setfscreate()
|
devurandom/portage | pym/portage/_sets/dbapi.py | Python | gpl-2.0 | 14,306 | 0.029568 | # Copyright 2007-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import time
from portage import os
from portage.versions import best, catsplit, vercmp
from portage.dep import Atom
from portage.localization import _
from portage._sets.base import PackageSet
from portage._sets import SetConfigError, get_boolean
import portage
__all__ = ["CategorySet", "DowngradeSet",
"EverythingSet", "OwnerSet", "VariableSet"]
class EverythingSet(PackageSet):
_operations = ["merge"]
description = "Package set which contains SLOT " + \
"atoms to match all installed packages"
_filter = None
def __init__(self, vdbapi, **kwargs):
super(EverythingSet, self).__init__()
self._db = vdbapi
def load(self):
myatoms = []
db_keys = ["SLOT"]
aux_get = self._db.aux_get
cp_list = self._db.cp_list
for cp in self._db.cp_all():
for cpv in cp_list(cp):
# NOTE: Create SLOT atoms even when there is only one
# SLOT installed, in order to avoid the possibility
# of unwanted upgrades as reported in bug #338959.
slot, = aux_get(cpv, db_keys)
atom = Atom("%s:%s" % (cp, slot))
if self._filter:
if self._filter(atom):
myatoms.append(atom)
else:
myatoms.append(atom)
self._setAtoms(myatoms)
def singleBuilder(self, options, settings, trees):
return EverythingSet(trees["vartree"].dbapi)
singleBuilder = classmethod(singleBuilder)
class OwnerSet(PackageSet):
_operations = ["merge", "unmerge"]
description = "Package set which contains all packages " + \
"that own one or more files."
def __init__(self, vardb=None, exclude_files=None, files=None):
super(OwnerSet, self).__init__()
self._db = vardb
self._exclude_files = exclude_files
self._files = files
def mapPathsToAtoms(self, paths, exclude_paths=None):
"""
All paths must have $EROOT stripped from the left side.
"""
rValue = set()
vardb = self._db
aux_get = vardb.aux_get
aux_keys = ["SLOT"]
if exclude_paths is None:
for link, p in vardb._owners.iter_owners(paths):
slot, = aux_get(link.mycpv, aux_keys)
rValue.add("%s:%s" % (link.mycpv.cp, slot))
else:
all_paths = set()
all_paths.update(paths)
all_paths.update(exclude_paths)
exclude_atoms = set()
for link, p in vardb._owners.iter_owners(all_paths):
slot, = aux_get(link.mycpv, aux_keys)
atom = "%s:%s" % (link.mycpv.cp, slot)
rValue.add(atom)
if p in exclude_paths:
exclude_atoms.add(atom)
rValue.difference_update(exclude_atoms)
return rValue
def load(self):
self._setAtoms(self.mapPathsToAtoms(self._files,
exclude_paths=self._exclude_files))
def singleBuilder(cls, options, settings, trees):
if not "files" in options:
raise SetConfigError(_("no files given"))
exclude_files = options.get("exclude-files")
if exclude_files is not None:
exclude_files = frozenset(portage.util.shlex_split(exclude_files))
return cls(vardb=trees["vartree"].dbapi, exclude_files=exclude_files,
files=frozenset(portage.util.shlex_split(options["files"])))
singleBuilder = classmethod(singleBuilder)
class VariableSet(EverythingSet):
_operations = ["merge", "unmerge"]
description = "Package set which contains all packages " + \
"that match specified values of a specified variable."
def __init__(self, vardb, metadatadb=None, variable=None, includes=None, excludes=None):
super(VariableSet, self).__init__(vardb)
self._metadatadb = metadatadb
self._variable = variable
self._includes = includes
self._excludes = excludes
def _filter(self, atom):
ebuild = best(self._metadatadb.match(atom))
if not ebuild:
return False
values, = self._metadatadb.aux_get(ebuild, [self._variable])
values = values.split()
if self._includes and not self._includes.intersection(values):
return False
if self._excludes and self._excludes.intersection(values):
return False
return True
def singleBuilder(cls, options, settings, trees):
variable = options.get("variable")
if variable is None:
raise SetConfigError(_("missing required attribute: 'variable'"))
includes = options.get("includes", "")
excludes = options.get("excludes", "")
if not (includes or excludes):
raise SetConfigError(_("no includes or excludes given"))
metadatadb = options.get("metadata-source", "vartree")
if not metadatadb in trees:
raise SetConfigError(_("invalid value '%s' for option metadata-source") % metadatadb)
return cls(trees["vartree"].dbapi,
metadatadb=trees[metadatadb].dbapi,
excludes=frozenset(excludes.split()),
includes=frozenset(includes.split()),
variable=variable)
singleBuilder = classmethod(singleBuilder)
class DowngradeSet(PackageSet):
_operations = ["merge", "unmerge"]
descrip | tion = "Package set which contains all packages " + \
"for which the highest visible ebuild version is lower than " + \
"the currently installed version."
def __init__(self, portdb=None, vardb=None):
super(DowngradeSet, self).__init__()
self._portdb = portdb
self._vardb = | vardb
def load(self):
atoms = []
xmatch = self._portdb.xmatch
xmatch_level = "bestmatch-visible"
cp_list = self._vardb.cp_list
aux_get = self._vardb.aux_get
aux_keys = ["SLOT"]
for cp in self._vardb.cp_all():
for cpv in cp_list(cp):
slot, = aux_get(cpv, aux_keys)
slot_atom = "%s:%s" % (cp, slot)
ebuild = xmatch(xmatch_level, slot_atom)
if not ebuild:
continue
if vercmp(cpv.version, ebuild.version) > 0:
atoms.append(slot_atom)
self._setAtoms(atoms)
def singleBuilder(cls, options, settings, trees):
return cls(portdb=trees["porttree"].dbapi,
vardb=trees["vartree"].dbapi)
singleBuilder = classmethod(singleBuilder)
class UnavailableSet(EverythingSet):
_operations = ["unmerge"]
description = "Package set which contains all installed " + \
"packages for which there are no visible ebuilds " + \
"corresponding to the same $CATEGORY/$PN:$SLOT."
def __init__(self, vardb, metadatadb=None):
super(UnavailableSet, self).__init__(vardb)
self._metadatadb = metadatadb
def _filter(self, atom):
return not self._metadatadb.match(atom)
def singleBuilder(cls, options, settings, trees):
metadatadb = options.get("metadata-source", "porttree")
if not metadatadb in trees:
raise SetConfigError(_("invalid value '%s' for option "
"metadata-source") % (metadatadb,))
return cls(trees["vartree"].dbapi,
metadatadb=trees[metadatadb].dbapi)
singleBuilder = classmethod(singleBuilder)
class UnavailableBinaries(EverythingSet):
_operations = ('merge', 'unmerge',)
description = "Package set which contains all installed " + \
"packages for which corresponding binary packages " + \
"are not available."
def __init__(self, vardb, metadatadb=None):
super(UnavailableBinaries, self).__init__(vardb)
self._metadatadb = metadatadb
def _filter(self, atom):
inst_pkg = self._db.match(atom)
if not inst_pkg:
return False
inst_cpv = inst_pkg[0]
return not self._metadatadb.cpv_exists(inst_cpv)
def singleBuilder(cls, options, settings, trees):
metadatadb = options.get("metadata-source", "bintree")
if not metadatadb in trees:
raise SetConfigError(_("invalid value '%s' for option "
"metadata-source") % (metadatadb,))
return cls(trees["vartree"].dbapi,
metadatadb=trees[metadatadb].dbapi)
singleBuilder = classmethod(singleBuilder)
class CategorySet(PackageSet):
_operations = ["merge", "unmerge"]
def __init__(self, category, dbapi, only_visible=True):
super(CategorySet, self).__init__()
self._db = dbapi
self._category = category
self._check = only_visible
if only_visible:
s="visible"
else:
s="all"
self.description = "Package set containing %s packages of category %s" % (s, self._category)
def load(self):
myatoms = []
for cp in self._db.cp_all():
if catsplit(cp)[0] == self._category:
if (not self._check) or len(self._db.match(cp)) > 0:
myatoms.append(cp)
self._setAtoms(myatoms)
def _builderGetRepository(cls, options, repositories):
repository = options.get("repository", "porttree")
if not repository in repositories:
raise SetConfigError(_ |
coder006/BinPy | BinPy/tools/powersource.py | Python | bsd-3-clause | 1,962 | 0.005097 | from BinPy.Gates import *
class PowerSource:
"""
Models a Power Source from which various connectors can tap by connecting to it.
taps: The list of all connectors connected to this power source.
connect(): Takes in one or more connectors as input and connects them to the power source.
disconnect(): Takes in one or more connectors as input and disconnects them from the power source.
"""
def __init__(self):
self.taps = []
def connect(self, *connectors):
"""Takes in one or more connectors as an input and taps to the power source."""
for connector in connectors:
if not isinstance(connector, Connector):
raise Exception("Error: Input given is not a connector")
else:
if len(connector.connections['output']) != 0:
raise Exception(
"ERROR: The connector is already an output of some other object")
self.taps.append(connector)
connector.state = 1
connector.tap(self, 'output')
connector.trigger()
def disconnect(self, *connectors):
"""
Takes in one or more connectors as an input and disconnects them from the power source.
A floating connector has a v | alue of None.
A message | is printed if a specified connector is not already tapping from this source.
"""
for connector in connectors:
if isinstance(connector, Connector):
try:
self.taps.remove(connector)
connector.state = None
connector.connections['output'].remove(self)
connector.trigger()
except:
print (
"The specified connector is not tapped to this power source")
else:
raise Exception("Error: Input given is not a connector")
|
jobovy/apogee-maps | py/get_agetables.py | Python | bsd-3-clause | 3,237 | 0.0173 | import os, os.path
import pexpect
import subprocess
from astropy.io import fits
import numpy as np
from optparse import OptionParser
import sys
import tempfile
from ftplib import FTP
import shutil
_ERASESTR= " "
def get_agetables(args,options):
cat = 'J/MNRAS/456/3655/'
tab1name = 'table1.dat'
tab2name = 'table2.dat.gz'
table2out = 'DR12_martigages_vizier.fits'
table1out = 'martig_table1.fits'
out = args[0]
_download_file_vizier(cat, out, catalogname=tab1name)
_download_file_vizier(cat, out, catalogname=tab2name)
subprocess.call(['gunzip', os.path.join(out,tab2name)])
ztab2name = 'table2.dat'
tab1_colnames = '2MASS_ID, Teff, Logg, [M/H], [C/M], [N/M], Massin, e_Massin, Massout, Age_in, e_Agein, E_Agein, Age_out'
tab2_colnames = '2MASS_ID, Teff, Logg, [M/H], [C/M], [N/M], Massout, Age'
tab1 = np.genfromtxt(os.path.join(out,tab1name), dtype=None, names=tab1_colnames)
tab2 = np.genfromtxt(os.path.join(out,ztab2name), dtype=None, names=tab2_colnames)
hdu1 = fits.BinTableHDU.from_columns(tab1)
hdu2 = fits.BinTableHDU.from_columns(tab2)
hdu1.writeto(os.path.join(out,table1out))
hdu2.writeto(os.path.join(out,table2out))
def _download_file_vizier(cat,filePath,catalogname='catalog.dat'):
'''
Stolen from Jo Bovy's gaia_tools package!
'''
sys.stdout.write('\r'+"Downloading file %s ...\r" \
% (os.path.basename(filePath)))
sys.stdout.flush()
try:
# make all intermediate directories
os.makedirs(os.path.dirname(filePath))
except OSError: pass
# Safe way of downloading
downloading= True
interrupted= False
file, tmp_savefilename= tempfile.mkstemp()
os.close(file) #Easier this way
ntries= 1
while downloading:
try:
ftp= FTP('cdsarc.u-strasbg.fr')
ftp.login('anonymous', 'test')
ftp.cwd(os.path.join('pub','cats',cat))
with open(tmp_savefilename,'wb') as savefile:
ftp.retrbinary('RETR %s' % catalogname,savefile.write)
shutil.move(tmp_savefilename,os.p | ath.join(filePath,catalogname))
downloading= False
if interrupted:
raise KeyboardInterrupt
except:
raise
if not downloading: #Assume KeyboardInterrupt
raise
elif ntries > _MAX_NTRIES:
raise IOError('File %s does not appear to exist on the server ...' % (os.path.basename(filePath)) | )
finally:
if os.path.exists(tmp_savefilename):
os.remove(tmp_savefilename)
ntries+= 1
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
return None
def get_options():
usage = "usage: %prog [options] <outpath>"
parser = OptionParser(usage=usage)
# Distances at which to calculate the effective selection function
parser.add_option("--convert",dest='convert',default=True ,action='store_true', help="convert to fits?")
return parser
if __name__ == '__main__':
parser = get_options()
options, args= parser.parse_args()
get_agetables(args,options)
|
Lucas-C/pre-commit | pre_commit/languages/script.py | Python | mit | 444 | 0 | from __future__ import unicode_literals
from pre_commit.languages import helpers
from pre_commit.xargs import xargs
ENVIRONMENT_DIR = None
get_default_version = helpers.basic_get_default_version
healthy = helpers.basic_healthy
install_environment = helpers.no_install
def run_hook(repo_cmd_runner, hook, file_args):
cmd = helpers.to_cmd(hook)
cmd = (repo_cmd_runner.prefix_dir + cmd[0],) + cmd[1:]
return xargs(cmd, file_args | )
| |
justinwp/croplands | migrations/versions/4986e64643f4_.py | Python | mit | 1,023 | 0.01173 | """empty message
Revision ID: 4986e64643f4
Revises: 175003d01257
Create Date: 2015-04-15 12:16:41.965765
"""
# revision identifiers, used by Alembic.
revision = '4986e64643f4'
down_revision = '175003d01257'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('tile', sa.Column('featu | re_id', sa.String(), nullable=False))
op.alter_column('tile', 'date_acquired',
existing_type=postgresql | .TIMESTAMP(),
nullable=False)
op.create_unique_constraint(None, 'tile', ['feature_id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'tile', type_='unique')
op.alter_column('tile', 'date_acquired',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
op.drop_column('tile', 'feature_id')
### end Alembic commands ###
|
khosrow/metpx | columbo/lib/cs/missingFile.py | Python | gpl-2.0 | 1,981 | 0.007067 | #!/usr/bin/python2
#!/software/pub/python-2.3.3/bin/python
"""
MetPX Copyright (C) 2004-2006 Environment Canada
MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
named COPYING in the root of the source directory tree.
"""
#############################################################################################
# Name: missingFile.py
#
# Author: Daniel Lemay
#
# Description: Display this page when a file is missing
#
# Date: 2006-09-27
#
#############################################################################################
import cgi
import cgitb; cgitb.enable()
import sys, os, pwd, time, re, pickle, commands
sys.path.append(sys.path[0] + "/../../lib");
sys.path.append("../../lib")
from PDSPath import *
from ColumboPaths import *
import template
form = cgi.FieldStorage()
filename = form["filename"].value
image = int(form["image"].value)
keys = os.environ.keys()
keys.sort()
print "Content-Type: text/html"
print
print """<html>
<head>
<meta name="Author" content="Daniel Lemay">
<meta name="Description" content="Error Message">
<meta name="Keywords" content="">
<title>Missing File</title>
<link rel="stylesheet" type="text/css" href="/css/style.css">
<style>
</style>
<script type="text/javascript">
</scrip | t>
</head>
<body text="#000000" bgcolor="#3b87a9" link="#00ff00" vlink="ff00ff" >
<center>
"""
if image:
template.printMainImageCenter()
#print "<br>"
#for key in keys:
# print "%s = %s <br>" % (key, os.environ[key])
print """
<table width="100%%" border="0" cell | padding="0" cellspacing="0">
<tr>
<td valign="top" align="center" bgcolor="#cccccc">
<br>
</td>
</tr>
<tr>
<td valign="top" align="center" bgcolor="#cccccc">
<blockquote>
<font size="6"><b><pre>Missing File (%s)</pre></b></font>
<font size="7" color="red"><b><pre>Contact DADS Pager</pre></b></font>
</blockquote>
</td>
</tr>
</table>
</center>
</body>
</html>
""" % (filename)
|
mandshaw/lethe | lethe/rollardex.py | Python | mit | 2,314 | 0.002593 | import os
import csv
from datetime import datetime
lethe_dir = os.path.dirname(__file__)
class PersonNotFoundError(BaseException):
pass
class Person(object):
"""
Class to store details of each person
"""
def __init__(self, name, email, dob, organiser, checksum):
"""
Constructor to create a person obj
:param name: Name
:param email: Email
:param dob: Date of birth in format DD/MM/YYYY
"""
self.name = name
self.email = email
self.dob = dob
self.organiser = organiser
self.checksum = checksum
@property
def dob(self):
return self._dob
@dob.setter
def dob(self, dob):
self._dob = datetime.strptime(dob, '%d/%m/%Y').date()
class RollarDex(object):
"""
Class to read in a csv file and Create a RollarDex of People
"""
def __init__(self, rollardex_source=None):
if rollardex_source is None:
rollardex_source = os.path.join(lethe_dir, 'birthdays.csv')
self.rollardex_source = rollardex_source
with open(rollardex_source) as rollardex_csv:
rollardex_reader = csv.reader(rollardex_csv)
self.contents = [Person(row[0], row[1], row[2], row[3], row[4]) for row in rollardex_reader]
def flip(self):
for content in self.contents:
yield content
def find(self, name):
for content in self.contents:
if content.name == name:
return content
raise PersonNotFoundError('Could not find {name} in RollarDex'.format(name=name))
def get_all_except(self, name):
for content in self.contents:
if content.name == name:
continue
else:
yield content
def update(self, birthday_boy, organiser, | checksum):
with open(self.rollardex_source, 'wb') as rollardex_csv:
rollardex_reader = csv.writer(rollardex_csv, delimiter=',')
birthday_boy = self.find(birthday_boy.name)
birthday_boy.organiser = organiser
birthday_boy.checksum = checksum
for person in self.contents:
rollardex_reader.writerow([person.name, person.email, person.dob.strftime('%d/%m/%Y'), person.organis | er, person.checksum])
|
Jumpscale/jumpscale_core8 | lib/JumpScale/tools/realitycapture/Monitor.py | Python | apache-2.0 | 3,119 | 0.000641 |
class Monitor:
def __init__(self, controller, redis_address):
self.controller = controller
self.redis_address = redis_address
def _schedule(self, gid, nid, domain, name, args={}, cron='@every 1m'):
"""
Schedule the given jumpscript (domain/name) to run on the specified node according to cron specs
:param gid: Grid id
:param nid: Node id
:param domain: jumpscript domain
:param name: jumpscript name
:param args: jumpscript arguments
:param cron: cron specs according to https://godoc.org/github.com/robfig/cron#hdr-CRON_Expression_Format
:return:
"""
args = args.update({'redis': self.redis_address})
#
# key = '.'.join([str(gid), str(nid), domain, name])
# self.controller.schedule()
def _unschedule(self, gid, nid, domain, name):
"""
Unschedule the given jumpscript
:param gid: Grid id
:param nid: Node id
:param domain: jumpscript domain
:param name: jumpscript name
:return:
"""
def disk(self, gid, nid, args={}, cron='@every 1m'):
"""
Schedule the disk monitor on the specified node.
:param gid: Grid id
:param nid | : Node id
:param args: ju | mpscript arguments
:param cron: cron specs
:return:
"""
pass
def virtual_machine(self, gid, nid, args={}, cron='@every 1m'):
"""
Schedule the vm monitor on the specified node.
:param gid: Grid id
:param nid: Node id
:param args: jumpscript arguments
:param cron: cron specs
:return:
"""
pass
def docker(self, gid, nid, args={}, cron='@every 1m'):
"""
Schedule the docker monitor on the specified node.
:param gid: Grid id
:param nid: Node id
:param args: jumpscript arguments
:param cron: cron specs
:return:
"""
pass
def nic(self, gid, nid, args={}, cron='@every 1m'):
"""
Schedule the nic monitor on the specified node.
:param gid: Grid id
:param nid: Node id
:param args: jumpscript arguments
:param cron: cron specs
:return:
"""
pass
def reality(self, gid, nid, args={}, cron='@every 10m'):
"""
Schedule the reality reader on the specified node. Reality jumpscript
will also use the aggregator client to report reality objects.
:param gid: Grid id
:param nid: Node id
:param args: jumpscript arguments
:param cron: cron specs
:return:
"""
pass
def logs(self, gid, nid, args={}, cron='@every 1m'):
"""
Schedule the logs reader on the specified node. Logs jumpscript
will collects logs from various sources and use the aggregator client
to report logs.
:param gid: Grid id
:param nid: Node id
:param args: jumpscript arguments
:param cron: cron specs
:return:
"""
pass
|
Haikson/sitemap-generator | pysitemap/db.py | Python | apache-2.0 | 852 | 0.002347 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
from sqlalchemy.sql import ClauseElement
DB_URI = 'sqlite:///stuff.db'
db_endine = create_engine(DB_URI)
session = scoped_session(
sessionmaker(
autocommit=False,
autoflush=False,
bind=db_endine
)
)
Model = declarative_base()
def get_or_ | create(session, model, defaults=None, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance, False
else:
params = dict((k, v) for k, v in kwargs.items() if not isinstance(v, ClauseElement))
params.update(defaults or {})
instance = model(**params)
session.add(instance)
| session.commit()
return instance, True
|
zzzsochi/includer | setup.py | Python | bsd-3-clause | 659 | 0.001517 | from setuptools import setup
setup(name='includer',
| version='1.0.2',
description='Include and run callables',
classifiers=[
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3",
"Development Status :: 5 - Production/Stable",
],
author='Alexande | r Zelenyak',
author_email='zzz.sochi@gmail.com',
license='BSD',
url='https://github.com/zzzsochi/includer',
keywords=['include', 'configure', 'pyramid'],
py_modules=['includer'],
install_requires=['zope.dottedname'],
tests_require=['pytest'],
)
|
waylonflinn/asv | test/test_dev.py | Python | bsd-3-clause | 3,311 | 0.000302 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import re
from os.path import abspath, dirname, join
import shutil
import pytest
import six
from asv import config
from asv.commands.dev import Dev
from asv.commands.profiling import Profile
from asv.commands.run import Run
from asv.commands import make_argparser
from . import tools
@pytest.fixture
def basic_conf(tmpdir):
tmpdir = six.text_type(tmpdir)
local = abspath(dirname(__file__))
os.chdir(tmpdir)
shutil.copyfile(join(local, 'asv-machine.json'),
join(tmpdir, 'asv-machine.json'))
conf = config.Config.from_json({
'env_dir': join(tmpdir, 'env'),
'benchmark_dir': join(local, 'benchmark'),
'results_dir': join(tmpdir, 'results_workflow'),
'html_dir': join(tmpdir, 'html'),
'repo': tools.generate_test_repo(tmpdir).path,
'project': 'asv',
'matrix': {
"six": [None],
"colorama": ["0.3.1", "0.3.3"]
}
})
return tmpdir, local, conf
def test_dev(capsys, basic_conf):
tmpdir, local, conf = basic_conf
# Test Dev runs
Dev.run(conf, _machine_file=join(tmpdir, 'asv-machine.json'))
text, err = capsys.readouterr()
# time_with_warnings failure case
assert re.search("File.*time_exception.*RuntimeError", text, re.S)
assert re.search(r"Running time_secondary.track_value\s+42.0", text)
# Check that it did not clone or install
assert "Cloning" not in text
assert "Installing" not in text
def test_run_python_same(capsys, basic_conf):
tmpdir, local, conf = basic_conf
# Test Run runs with python=same
Run.run(conf, _machine_file=join(tmpdir, 'asv-machine.json'), python="same")
text, err = capsys.readouterr()
assert re.search("time_exception.*failed", text, re.S)
assert re.search(r"Running time_secondary.track_value\s+42.0", text)
# Check that it did not clone or install
assert "Cloning" not in text
assert "Installing" not in text
def test_profile_python_same(capsys, basic_conf):
tmpdir, local, conf = basic_conf
# Test Profile can run with python=same
Profile.run(conf, "time_secondary.track_value",
_machine_file=join(tmpdir, 'asv-machine.json'),
python="same")
text, err = capsys.readouterr()
# time_with_warnings failure case
assert re.search(r"^\s+1\s+.*time_secondary.*\(track_value\)", text, re.M)
# Check that it did not clone or install
assert "Cloning" not in text
assert "Installing" not in text
def test_dev_python_arg():
parser, subparsers = make_argparser()
argv = ['dev']
args = parser.parse_args(argv)
assert args.python == 'same'
argv = ['dev', '--python=foo']
args = parser.parse_args(argv)
assert args.python == 'foo'
argv = ['run', 'ALL']
args = parser.parse_ar | gs(argv)
assert args.python is None
def test_run_steps_arg():
parser, subparsers = mak | e_argparser()
argv = ['run', '--steps=20', 'ALL']
args = parser.parse_args(argv)
assert args.steps == 20
if __name__ == '__main__':
test_dev('/tmp')
|
emulbreh/vacuous | vacuous/backends/__init__.py | Python | mit | 995 | 0.007035 | import threading
from django.conf import settings
from django.utils.importlib import import_module
from django.core.signals import request_finished
_cache = threading.local()
def __init():
if not hasattr(_cache, 'backends'):
_cache.backends = {}
def load_backend(flavor, path, cache=True):
__init()
key = (flavor, path)
if key not in _cache.backends or not cache:
import_path = getattr(settings, 'VACUOUS_BACKENDS')[flavor]
| module_path, cls_name = import_path.rsplit('.', 1)
cls = getattr(import_module(module_path), cls_name)
backend = cls(path)
backend.flavor = flavor
if not cache:
return backend
_cache.backends[key] = backend
return _cache.backends[key]
def purge_backend_cache():
__init()
_cache.backends = {}
def iter_cached_backends():
__init()
return _cache.backends.itervalues()
request_finished.connect(lambda sender, * | *kwargs: purge_backend_cache())
|
willowd878/nca47 | nca47/api/hooks/auth_token.py | Python | apache-2.0 | 1,716 | 0.001166 | import re
from keystonemiddleware import auth_token
from oslo_log import log
from nca47.common import exception
from nca47.common.i18n import _
from nca47.common import utils
LOG = log.getLogger(__name__)
class AuthTokenMiddleware(auth_token.AuthProtocol):
"""A wrapper on Keystone auth_token middleware.
Does not perform verification of authentication tokens
for public routes in the API.
"""
def __init__(self, app, conf, public_api_routes=[]):
self._nca_app = app
# TODO(mrda): Remove .xml and ensure that doesn't result in a
# 401 Authentication Required instead of 404 Not Found
route_pattern_tpl = '%s(\.json|\.xml)?$'
try:
| self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl)
for route_tpl in pu | blic_api_routes]
except re.error as e:
msg = _('Cannot compile public API routes: %s') % e
LOG.error(msg)
raise exception.ConfigInvalid(error_msg=msg)
super(AuthTokenMiddleware, self).__init__(app, conf)
def __call__(self, env, start_response):
path = utils.safe_rstrip(env.get('PATH_INFO'), '/')
# The information whether the API call is being performed against the
# public API is required for some other components. Saving it to the
# WSGI environment is reasonable thereby.
env['is_public_api'] = any(map(lambda pattern: re.match(pattern, path),
self.public_api_routes))
if env['is_public_api']:
return self._nca_app(env, start_response)
return super(AuthTokenMiddleware, self).__call__(env, start_response)
|
amwelch/a10sdk-python | a10sdk/core/A10_file/file_ssl_cert_key.py | Python | apache-2.0 | 1,981 | 0.007572 | from a10sdk.common.A10BaseClass import A10BaseClass
class SslCertKey(A10BaseClass):
""" :param action: {"optional": true, "enum": ["create", "import", "export", "copy", "rename", "check", "replace", "delete"], "type": "string", "description": "'create': create; 'import': import; 'export': export; 'copy': copy; 'rename': rename; 'check': check; 'replace': replace; 'delete': delete; ", "format": "enum"}
:param dst_file: {"description": "destination file name for copy and rename action", "format": "string", "minLength": 1, "optional": true, "maxLength": 32, "type": "string"}
:param file_handle: {"description": "full path of the uploaded file", "format": "string-rlx", "minLength": 1, "optional": true, "maxLength": 255, "type": "string"}
:param file: {"description": "ssl certificate local file name", "format": "string", "minLength": 1, "optional": true, "maxLength": 255, "type": "string"}
:param size: {"descriptio | n": "ssl certificate file size in byte", "format": "number", "type": "number", "maximum": 2147483647, "minimum": 0, "optional": true}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
ssl certificate and key file inform | ation and management commands.
Class ssl-cert-key supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/file/ssl-cert-key`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ssl-cert-key"
self.a10_url="/axapi/v3/file/ssl-cert-key"
self.DeviceProxy = ""
self.action = ""
self.dst_file = ""
self.file_handle = ""
self.A10WW_file = ""
self.size = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
gandalfcode/gandalf | tests/paper_tests/jeanstest.py | Python | gpl-2.0 | 9,111 | 0.01339 | #==============================================================================
# jeanstest.py
# Run Noh test using initial conditions file 'noh.dat'.
#==============================================================================
from gandalf.analysis.facade import *
import time
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import rc
from mpl_toolkits.axes_grid1 import AxesGrid
from scipy.optimize import curve_fit
#--------------------------------------------------------------------------------------------------
rc('font', **{'family': 'normal', 'weight' : 'bold', 'size' : 16})
rc('text', usetex=True)
# Set all plot limits
xmin = 0.0
xmax = 1.0
rhomin = 0.95
rhomax = 1.05
stride = 7
sim_no = 0
#tend = 0.05
amp = 0.025
rho0 = 1.0
press0 = 1.0
wavelength = 1.0
mu_bar = 1.0
lmin = 0.05*wavelength
lmax = 1.95*wavelength
ratiomin = 0.0
ratiomax = 2.0
numSimMax = 24
stable_lambdas = []
stable_periods = []
unstable_lambdas = []
unstable_periods = []
stable_wavelengths = np.arange(lmin, 0.999*wavelength, 0.001)
unstable_wavelengths = np.arange(1.001*wavelength, lmax, 0.001)
x_solution = np.arange(0.0, wavelength, 0.001*wavelength)
# Analytical solutions for stable and unstable modes (for function fitting)
#--------------------------------------------------------------------------------------------------
#def | jeans_stable_solution(x, omega, rhofit):
# return rhofit*(1.0 + amp*np.sin(2.0*math.pi*x/wavelength)*np.cos(omega*tend))
#def jeans_unstable_solution(x, omega, rhofit):
# return rho0*(1.0 + amp*np.sin(2.0*math.pi*x/wavelength)*np.cosh(omega*tend))
def oscillation_period(lambda_jeans):
return np.sqrt(np.pi/rho0)*wavelength/np.sqrt(lambda_jeans*lambda_jeans - wavelength*wavelength)
def growth_timescale(lambda_jeans):
return np.sqrt(1/4. | 0/np.pi/rho0)*wavelength/np.sqrt(wavelength*wavelength - lambda_jeans*lambda_jeans)
# Quick sims to generate
periodic_sim = newsim('jeans.dat')
periodic_sim.SetParam('tsnapfirst', 0.0)
periodic_sim.SetParam('temp0', 0.1)
periodic_sim.SetParam('amp', 0.15)
periodic_sim.SetParam('Nstepsmax',1)
setupsim()
run()
# Get grav. data for simulaton for plotting
x_data0 = get_data("x", sim=sim_no)
ax_data0 = get_data("ax", sim=sim_no)
ax_analytical0 = get_analytical_data("x", "ax", sim=sim_no)
sim_no = sim_no + 1
# Quick sims to generate
nonperiodic_sim = newsim('jeans.dat')
nonperiodic_sim.SetParam('tsnapfirst', 0.0)
nonperiodic_sim.SetParam('temp0', 0.1)
nonperiodic_sim.SetParam('amp', 0.15)
nonperiodic_sim.SetParam('Nstepsmax',1)
nonperiodic_sim.SetParam('boundary_lhs[0]','open')
nonperiodic_sim.SetParam('boundary_rhs[0]','open')
nonperiodic_sim.SetParam('boundary_lhs[1]','open')
nonperiodic_sim.SetParam('boundary_rhs[1]','open')
nonperiodic_sim.SetParam('boundary_lhs[2]','open')
nonperiodic_sim.SetParam('boundary_rhs[2]','open')
setupsim()
run()
# Get grav. data for simulaton for plotting
x_data1 = get_data("x", sim=sim_no)
ax_data1 = get_data("ax", sim=sim_no)
sim_no = sim_no + 1
# Create matplotlib figure object with shared x-axis
#--------------------------------------------------------------------------------------------------
fig, axarr = plt.subplots(1, 1, figsize=(7,6), sharex='row')
fig.subplots_adjust(hspace=0.001, wspace=0.001)
fig.subplots_adjust(bottom=0.11, top=0.98, left=0.13, right=0.98)
#axarr[0].set_ylabel(r"$\rho$")
#axarr[0].set_xlim([xmin, xmax])
#axarr[0].plot(rho_analytical.x_data, rho_analytical.y_data, color="red", linestyle='-', lw=0.5)
##axarr[0].plot(x_solution, jeans_unstable_solution(x_solution, *popt), color='blue')
#axarr[0].scatter(x_data[::stride], rho_data[::stride], color='black', marker='.', s=4.0)
axarr.set_ylabel(r"$a_{x}$")
axarr.set_xlabel(r"$x$")
axarr.set_xlim([xmin, xmax])
axarr.set_ylim([-1.9,1.9])
axarr.plot(ax_analytical0.x_data, ax_analytical0.y_data, color="red", linestyle='-', lw=0.5, label='Solution')
axarr.scatter(x_data1[::stride], ax_data1[::stride], color='blue', marker='+', s=8.0, label='No periodic corrections')
axarr.scatter(x_data0[::stride], ax_data0[::stride], color='black', marker='.', s=16.0, label='Ewald corrections')
legend1 = axarr.legend(loc='upper right', fontsize=12)
plt.show()
fig.savefig('jeansaccel.pdf', dpi=50)
# Perform small suite of simulations to compare to expected with dispersion relation.
#--------------------------------------------------------------------------------------------------
for i in range(numSimMax):
# Chose jeans length
ratio = ratiomin + (i + 0.5)*(ratiomax - ratiomin)/numSimMax
if ratio > 0.99 and ratio < 1.01: continue
lambda_jeans = wavelength/ratio
csound = lambda_jeans*math.sqrt(rho0/math.pi)
temp0 = mu_bar*csound*csound
if wavelength > lambda_jeans:
tsim = growth_timescale(lambda_jeans)
else:
tsim = 0.3*oscillation_period(lambda_jeans)
print 'LAMBDA_JEANS : ',lambda_jeans,tsim
sim = newsim('jeans_mfm.dat')
sim.SetParam('tend', tsim)
sim.SetParam('tsnapfirst', tsim)
sim.SetParam('temp0', temp0)
setupsim()
run()
# Get grav. data for simulaton for plotting
x_data = get_data("x", sim=sim_no)
vx_data = get_data("vx", sim=sim_no)
rho_data = get_data("rho", sim=sim_no)
rho_analytical = get_analytical_data("x", "rho", sim=sim_no)
vx_analytical = get_analytical_data("x", "vx", sim=sim_no)
sim_no = sim_no + 1
# Compute best-fit for simulation
if wavelength > lambda_jeans:
omega_analytical = 1.0/growth_timescale(lambda_jeans)
print "OMEGA_ANA : ",omega_analytical
def jeans_unstable_solution(x, omega, rhofit):
return rho0*(1.0 + amp*np.sin(2.0*math.pi*x/wavelength)*np.cosh(omega*tsim))
popt, pcov = curve_fit(jeans_unstable_solution, x_data, rho_data,
bounds=([0.5*omega_analytical, 0.9*rho0], [2.0*omega_analytical, 1.1*rho0]))
unstable_lambdas.append(wavelength/lambda_jeans)
unstable_periods.append(1.0/popt[0])
print 'UNSTABLE SOLUTION : ',popt[0],popt[1]
#fig, axarr = plt.subplots(1, 1, figsize=(7,10), sharex='row')
#axarr.set_ylabel(r"$\rho$")
#axarr.set_xlim([xmin, xmax])
#axarr.plot(rho_analytical.x_data, rho_analytical.y_data, color="red", linestyle='-', lw=0.5)
#axarr.plot(x_solution, jeans_unstable_solution(x_solution, *popt), color='blue')
#axarr.scatter(x_data[::stride], rho_data[::stride], color='black', marker='.', s=4.0)
#plt.show()
#block()
else:
omega_analytical = 2.0*math.pi/oscillation_period(lambda_jeans)
print "OMEGA_ANA : ",omega_analytical
def jeans_stable_solution(x, omega, rhofit):
return rhofit*(1.0 + amp*np.sin(2.0*math.pi*x/wavelength)*np.cos(omega*tsim))
popt, pcov = curve_fit(jeans_stable_solution, x_data, rho_data,
bounds=([0.7*omega_analytical, 0.9*rho0], [1.5*omega_analytical, 1.1*rho0]))
stable_lambdas.append(wavelength/lambda_jeans)
stable_periods.append(2.0*math.pi/popt[0])
print 'STABLE SOLUTION : ',popt[0],popt[1]
#fig, axarr = plt.subplots(1, 1, figsize=(7,10), sharex='row')
#axarr.set_ylabel(r"$\rho$")
#axarr.set_xlim([xmin, xmax])
#axarr.plot(rho_analytical.x_data, rho_analytical.y_data, color="red", linestyle='-', lw=0.5)
#axarr.plot(x_solution, jeans_stable_solution(x_solution, *popt), color='blue')
#axarr.scatter(x_data[::stride], rho_data[::stride], color='black', marker='.', s=4.0)
#plt.show()
#block()
# Dispersion relation figure
#--------------------------------------------------------------------------------------------------
fig2, axarr2 = plt.subplots(1, 1, figsize=(7,5), sharex='row')
fig2.subplots_adjust(hspace=0.001, wspace=0.001)
fig2.subplots_adjust(bottom=0.11, top=0.97, left=0.1, right=0.98)
x_stable_aux = wavelength/stable_wavelengths
y_stable = oscillation_period(x_stable_aux)
x_unstable_aux = wavelength/unstable_wavelengths
y_unstable = growth_times |
jomolinare/kobocat | onadata/apps/logger/models/__init__.py | Python | bsd-2-clause | 437 | 0 | from onadata.apps.logger.models.attachment import Attachment # flake8: noqa
from onadata.apps.logger.models.instance import Instance
from onadata.apps.logger.models.survey_type import SurveyType
from | onadata.apps.logger.models.xform import XForm
from onadata.apps.logger.xform_instance_parser import Instance | ParseError
from onadata.apps.logger.models.ziggy_instance import ZiggyInstance
from onadata.apps.logger.models.note import Note
|
CobwebOrg/cobweb-django | core/admin_inlines.py | Python | mit | 489 | 0 | from dja | ngo.contrib import admin
from django.contrib.contenttypes.admin import GenericStackedInline
from core.models import Organization, Note, CrawlScope
class OrganizationInline(admin.TabularInline):
model = Organization
class NoteInline(GenericStackedInline):
model = Note
fields = ('when_created', 'author', 'visibility', 'text')
readonly_fields = ('when_created',)
| extra = 0
class CrawlScopeInline(admin.StackedInline):
model = CrawlScope
extra = 0
|
LorenaCapo/1DAW_Python | Práctica 2/practica2_exercici8.py | Python | gpl-3.0 | 263 | 0.007605 | #Pràctica 2 - Exercici 8
# Fer un programa on l'usuari introdueixi un preu del producte i aquest
# calcu | li el preu amb IVA (IVA 16%)
precio = int(raw_input("¿Qué precio tiene el producto? "))
total = precio * | 1.16
print "El precio total es: " , total
|
jdepoix/goto_cloud | goto_cloud/urls.py | Python | mit | 767 | 0 | """goto_cloud URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.ur | ls import url, include
2. Add a URL to urlpatterns: url(r'^blo | g/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
jaeilepp/mne-python | mne/beamformer/tests/test_dics.py | Python | bsd-3-clause | 13,250 | 0 | from __future__ import print_function
import warnings
import os.path as op
import copy as cp
from nose.tools import assert_true, assert_raises, assert_equal
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import mne
from mne.datasets import testing
from mne.beamformer import dics, dics_epochs, dics_source_power, tf_dics
from mne.time_frequency import csd_epochs
from mne.externals.six import advance_iterator
from mne.utils import run_tests_if_main
# Note that this is the first test file, this will apply to all subsequent
# tests in a full nosetest:
warnings.simplefilter("always") # ensure we can verify expected warnings
data_path = testing.data_path(download=False)
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_vol = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_event = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw-eve.fif')
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', 'Aud-lh.label')
def _read_forward_solution_meg(*args, **kwargs):
fwd = mne.read_forward_solution(*args, **kwargs)
return mne.pick_types_forward(fwd, meg=True, eeg=False)
def _get_data(tmin=-0.11, tmax=0.15, read_all_forward=True, compute_csds=True):
"""Read in data used in tests."""
label = mne.read_label(fname_label)
events = mne.read_events(fname_event)[:10]
raw = mne.io.read_raw_fif(fname_raw, preload=False)
raw.add_proj([], remove_existing=True) # we'll subselect so remove proj
forward = mne.read_forward_solution(fname_fwd)
if read_all_forward:
forward_surf_ori = _read_forward_solution_meg(fname_fwd, surf_ori=True)
forward_fixed = _read_forward_solution_meg(fname_fwd, force_fixed=True,
surf_ori=True)
forward_vol = mne.read_forward_solution(fname_fwd_vol, surf_ori=True)
else:
forward_surf_ori = None
forward_fixed = None
forward_vol = None
event_id, tmin, tmax = 1, tmin, tmax
# Setup for reading the raw data
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
# Set up pick list: MEG - bad channels
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False,
stim=True, eog=True, exclude='bads',
selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
| picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
epochs.resample(200, npad=0, n_jobs=2)
evoked = epochs.average().crop(0, None)
# Computing the data and noise cross-spectral den | sity matrices
if compute_csds:
data_csd = csd_epochs(epochs, mode='multitaper', tmin=0.045,
tmax=None, fmin=8, fmax=12,
mt_bandwidth=72.72)
noise_csd = csd_epochs(epochs, mode='multitaper', tmin=None,
tmax=0.0, fmin=8, fmax=12,
mt_bandwidth=72.72)
else:
data_csd, noise_csd = None, None
return raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol
@testing.requires_testing_data
def test_dics():
"""Test DICS with evoked data and single trials."""
raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
epochs.crop(0, None)
reg = 0.5 # Heavily regularize due to low SNR
for real_filter in (True, False):
stc = dics(evoked, forward, noise_csd=noise_csd, data_csd=data_csd,
label=label, real_filter=real_filter, reg=reg)
stc_pow = np.sum(stc.data, axis=1)
idx = np.argmax(stc_pow)
max_stc = stc.data[idx]
tmax = stc.times[np.argmax(max_stc)]
# Incorrect due to limited number of epochs
assert_true(0.04 < tmax < 0.06, msg=tmax)
assert_true(3. < np.max(max_stc) < 6., msg=np.max(max_stc))
# Test picking normal orientation
stc_normal = dics(evoked, forward_surf_ori, noise_csd, data_csd,
pick_ori="normal", label=label, real_filter=True,
reg=reg)
assert_true(stc_normal.data.min() < 0) # this doesn't take abs
stc_normal = dics(evoked, forward_surf_ori, noise_csd, data_csd,
pick_ori="normal", label=label, reg=reg)
assert_true(stc_normal.data.min() >= 0) # this does take abs
# The amplitude of normal orientation results should always be smaller than
# free orientation results
assert_true((np.abs(stc_normal.data) <= stc.data).all())
# Test if fixed forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_epochs, epochs, forward_fixed, noise_csd,
data_csd, pick_ori="normal")
# Test if non-surface oriented forward operator is detected when picking
# normal orientation
assert_raises(ValueError, dics_epochs, epochs, forward, noise_csd,
data_csd, pick_ori="normal")
# Test if volume forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_epochs, epochs, forward_vol, noise_csd,
data_csd, pick_ori="normal")
# Now test single trial using fixed orientation forward solution
# so we can compare it to the evoked solution
stcs = dics_epochs(epochs, forward_fixed, noise_csd, data_csd, label=label)
# Testing returning of generator
stcs_ = dics_epochs(epochs, forward_fixed, noise_csd, data_csd,
return_generator=True, label=label)
assert_array_equal(stcs[0].data, advance_iterator(stcs_).data)
# Test whether correct number of trials was returned
epochs.drop_bad()
assert_true(len(epochs.events) == len(stcs))
# Average the single trial estimates
stc_avg = np.zeros_like(stc.data)
for this_stc in stcs:
stc_avg += this_stc.data
stc_avg /= len(stcs)
idx = np.argmax(np.max(stc_avg, axis=1))
max_stc = stc_avg[idx]
tmax = stc.times[np.argmax(max_stc)]
assert_true(0.120 < tmax < 0.150, msg=tmax) # incorrect due to limited #
assert_true(12 < np.max(max_stc) < 18.5)
@testing.requires_testing_data
def test_dics_source_power():
"""Test DICS source power computation."""
raw, epochs, evoked, data_csd, noise_csd, label, forward,\
forward_surf_ori, forward_fixed, forward_vol = _get_data()
epochs.crop(0, None)
reg = 0.05
stc_source_power = dics_source_power(epochs.info, forward, noise_csd,
data_csd, label=label, reg=reg)
max_source_idx = np.argmax(stc_source_power.data)
max_source_power = np.max(stc_source_power.data)
# TODO: Maybe these could be more directly compared to dics() results?
assert_true(max_source_idx == 1)
assert_true(0.004 < max_source_power < 0.005, msg=max_source_power)
# Test picking normal orientation and using a list of CSD matrices
stc_normal = dics_source_power(epochs.info, forward_surf_ori,
[noise_csd] * 2, [data_csd] * 2,
pick_ori="normal", label=label, reg=reg)
assert_true(stc_normal.data.shape == (stc_source_power.data.shape[0], 2))
# The normal orientation results should always be smaller than free
# orientation results
assert_true((np.abs(stc_normal.data[:, 0]) <=
stc_source_power.data[:, 0]).all())
# Test if fixed forward operator is detected when picking normal
# orientation
assert_raises(ValueError, dics_source_power, raw.info, forward_fixed,
noise_csd, data_csd, pick_ori="normal")
# Test if non-surface o |
hankcs/HanLP | plugins/hanlp_demo/hanlp_demo/zh/tf/train/train_ptb_dep_sa_albert_topk.py | Python | apache-2.0 | 1,269 | 0.001576 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-03-07 23:48
from hanlp.metrics.parsing import conllx_eval
from hanlp.datasets.parsing.ptb import PTB_SD330_DEV, PTB_SD330_TRAIN, PTB_SD330_TEST, PTB_TOKEN_MAPPING
from hanlp.components.parsers.biaffine_parser_tf import BiaffineTransformerDependencyParserTF, \
StructuralAttentionDependencyParserTF
from hanlp.pretrained.glove import GLOVE_840B_300D
from tests import cdroot
cdroot()
save_dir = 'data/model/dep/ptb_sa_topk'
parser = StructuralAttentionDependencyParserTF()
parser.fit(PTB_SD330_TRAIN, PTB_SD330_DEV, save_dir, 'bert-base-uncased',
batch_size=3000,
warmup_steps_ratio=.1,
token_mapping=PTB_TOKEN_MAPPING,
samples_per_batch=150,
transformer_dropout=.33,
masked_lm_dropout=.33,
learning_rate=2e-3,
learni | ng_rate_transformer=1e-5,
# alpha=1,
# early_stopping_patience=10,
| # num_decoder_layers=2,
)
parser.load(save_dir)
# output = f'{save_dir}/test.predict.conll'
parser.evaluate(PTB_SD330_TEST, save_dir, warm_up=False)
# uas, las = conllx_eval.evaluate(PTB_SD330_TEST, output)
# print(f'Official UAS: {uas:.4f} LAS: {las:.4f}')
print(f'Model saved in {save_dir}')
|
elena/django | tests/postgres_tests/test_operations.py | Python | bsd-3-clause | 19,320 | 0.002381 | import unittest
from unittest import mock
from migrations.test_base import OperationTestBase
from django.db import NotSupportedError, connection
from django.db.migrations.state import ProjectState
from django.db.models import Index
from django.db.utils import ProgrammingError
from django.test import modify_settings, override_settings, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from . import PostgreSQLTestCase
try:
from django.contrib.postgres.indexes import BrinIndex, BTreeIndex
from django.contrib.postgres.operations import (
AddIndexConcurrently, BloomExtension, CreateCollation, CreateExtension,
RemoveCollation, RemoveIndexConcurrently,
)
except ImportError:
pass
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.')
@modify_settings(INSTALLED_APPS={'append': 'migrations'})
class AddIndexConcurrentlyTests(OperationTestBase):
app_label = 'test_add_concurrently'
def test_requires_atomic_false(self):
project_state = self.set_up_test_model(self.app_label)
new_state = project_state.clone()
operation = AddIndexConcurrently(
'Pony',
Index(fields=['pink'], name='pony_pink_idx'),
)
msg = (
'The AddIndexConcurrently operation cannot be executed inside '
'a transaction (set atomic = False on the migration).'
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.schema_editor(atomic=True) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
def test_add(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = '%s_pony' % self.app_label
index = Index(fields=['pink'], name='pony_pink_idx')
new_state = project_state.clone()
operation = AddIndexConcurrently('Pony', index)
self.assertEqual(
operation.describe(),
'Concurrently create index pony_pink_idx on field(s) pink of '
'model Pony'
)
operation.state_forwards(self.app_label, new_state)
self.assertEqual(len(new_state.models[self.app_label, 'pony'].options['indexes']), 1)
self.assertIndexNotExists(table_name, ['pink'])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ['pink'])
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIndexNotExists(table_name, ['pink'])
# Deconstruction.
name, args, kwargs = operation.deconstruct()
self.assertEqual(name, 'AddIndexConcurrently')
self.assertEqual(args, [])
self.assertEqual(kwargs, {'model_name': 'Pony', 'index': index})
def test_add_other_index_type(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = '%s_pony' % self.app_label
new_state = project_state.clone()
operation = AddIndexConcurrently(
'Pony',
BrinIndex(fields=['pink'], name='pony_pink_brin_idx'),
)
self.assertIndexNotExists(table_name, ['pink'])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ['pink'], index_type='brin')
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIndexNotExists(table_name, ['pink'])
def test_add_with_options(self):
project_state = self.set_up_test_model(self.app_label, index=False)
table_name = '%s_pony' % self.app_label
new_state = project_state.clone()
index = BTreeIndex(fields=['pink'], name='pony_pink_btree_idx', fillfactor=70)
operation = AddIndexConcurrently('Pony', index)
self.assertIndexNotExists(table_name, ['pink'])
# Add index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ['pink'], index_type='btree')
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIndexNotExists(table_name, ['pink'])
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.')
@modify_settings(INSTALLED_APPS={'append': 'migrations'})
class RemoveIndexConcurrentlyTests(OperationTestBase):
app_label = 'test_rm_concurrently'
def test_requires_atomic_false(self):
project_state = self.set_up_test_model(self.app_label, index=True)
new_state = project_state.clone()
operation = RemoveIndexConcurrently('Pony', 'pony_pink_idx')
msg = (
'The RemoveIndexConcurrently operation cannot be executed inside '
'a transaction (set atomic = False on the migration).'
)
with self.assertRaisesMessage(NotSupportedError, msg):
with connection.schema_editor(atomic=True) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
def test_remove(self):
project_state = self.set_up_test_model(self.app_label, index=True)
table_name = '%s_pony' % self.app_label
self.assertTableExists(table_name)
new_state = project_state.clone()
operation = RemoveIndexConcurrently('Pony', 'pony_pink_idx')
self.assertEqual(
operation.describe(),
'Concurrently remove index pony_pink_idx from Pony',
)
operation.state_forwards(self.app_label, new_state)
self.assertEqual(len(new_state.models[self.app_label, 'pony'].options['indexes']), 0)
self.assertIndexExists(table_name, ['pink'])
# Remove index.
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self.app_label, editor, project_state, new_state)
self.assertIndexNotExists(table_name, ['pink'])
# Reversal.
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertIndexExists(table_name, ['pink'])
# Deconstruction.
name, args, kwargs = operation.deconstruct()
self.assertEqual(name, 'RemoveIndexConcurrently')
self.assertEqual(args, [])
self.assertEqual(kwargs, {'model_name': 'Pony', 'name': 'pony_pink_idx'})
class NoMigrationRouter():
def allow_migrate(self, db, app_label, **hints):
return False
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL specific tests.')
class CreateExtensionTests(PostgreSQLTestCase):
app_label = 'test_allow_create_extention'
@override_settings(DATABASE_ROUTERS=[NoMigrationRouter()])
def test_no_allow_migrate(self):
operation = CreateExtension('tablefunc')
project_state = ProjectState()
new_state = project_state.clone()
# Don't create an extension.
with CaptureQueriesContext(connection) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_forwards(self | .app_label, editor, project_state, new_state)
self.assertEqual(len(captured_queries), 0)
# Reversal.
with CaptureQueriesContext(connecti | on) as captured_queries:
with connection.schema_editor(atomic=False) as editor:
operation.database_backwards(self.app_label, editor, new_state, project_state)
self.assertEqual(len(captured_queries), 0)
def test_allow_migrate( |
jonathanmorgan/reddit_data | migrations/0006_auto__add_field_subreddit_time_series_data_create_date__add_field_subr.py | Python | gpl-3.0 | 20,772 | 0.00804 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Subreddit_Time_Series_Data.create_date'
db.add_column(u'reddit_data_subreddit_time_series_data', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 9, 10, 0, 0), blank=True),
keep_default=False)
# Adding field 'Subreddit_Time_Series_Data.last_update'
db.add_column(u'reddit_data_subreddit_time_series_data', 'last_update',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 9, 10, 0, 0), blank=True),
keep_default=False)
# Adding field 'Domain_Time_Series_Data.create_date'
db.add_column(u'reddit_data_domain_time_series_data', 'create_date',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2013, 9, 10, 0, 0), blank=True),
keep_default=False)
# Adding field 'Domain_Time_Series_Data.last_update'
db.add_column(u'reddit_data_domain_time_series_data', 'last_update',
self.gf('django.db.models.fields.DateTimeField')(auto_now=True, default=datetime.datetime(2013, 9, 10, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Subreddit_Time_Series_Data.create_date'
db.delete_column(u'reddit_data_subreddit_time_series_data', 'create_date')
# Deleting field 'Subreddit_Time_Series_Data.last_update'
db.delete_column(u'reddit_data_subreddit_time_series_data', 'last_update')
# Deleting field 'Domain_Time_Series_Data.create_date'
db.delete_column(u'reddit_data_domain_time_series_data', 'create_date')
# Deleting field 'Domain_Time_Series_Data.last_update'
db.delete_column(u'reddit_data_domain_time_series_data', 'last_update')
models = {
u'django_time_series.time_period': {
'Meta': {'object_name': 'Time_Period'},
'aggregate_index': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project_category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'project_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'time_period_category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time_period_index': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_period_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time_period_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'reddit_collect.domain': {
'Meta': {'object_name': 'Domain'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_multimedia': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_news': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_self_post': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'long_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'use_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'reddit_collect.subreddit': {
'Meta': {'object_name': 'Subreddit'},
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created_dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created_utc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_utc_dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'} | ),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'filter_1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_10': ('django.db.models.fields.BooleanField', [], {' | default': 'False'}),
'filter_2': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_3': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_4': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_5': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_6': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_7': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_8': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_9': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'header_title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'over_18': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reddit_full_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'reddit_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subscribers': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'reddit_data.domain_time_series_data': {
'Meta': {'object_name': 'Domain_Time_Series_Data'},
'aggregate_index': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['reddit_collect.Domain']", 'null': 'True', 'blank': 'True'}),
'domain_long_name': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'domain_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'downvotes_average': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '1 |
sorenh/cc | vendor/Twisted-10.0.0/twisted/lore/man2lore.py | Python | apache-2.0 | 7,773 | 0.003988 | # -*- test-case-name: twisted.lore.test.test_man2lore -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
man2lore: Converts man page source (i.e. groff) into lore-compatible html.
This is nasty and hackish (and doesn't support lots of real groff), but is good
enough for converting fairly simple man pages.
"""
import re, os
quoteRE = re.compile('"(.*?)"')
def escape(text):
text = text.replace('<', '<').replace('>', '>')
text = quoteRE.sub('<q>\\1</q>', text)
return text
def stripQuotes(s):
if s[0] == s[-1] == '"':
s = s[1:-1]
return s
class ManConverter(object):
"""
Convert a man page to the Lore format.
@ivar tp: State variable for handling text inside a C{TP} token. It can
take values from 0 to 3:
- 0: when outside of a C{TP} token.
- 1: once a C{TP} token has been encountered. If the previous value
was 0, a definition list is started. Then, at the first line of
text, a definition term is started.
- 2: when the first line after the C{TP} token has been handled.
The definition term is closed, and a definition is started with
the next line of text.
- 3: when the first line as definition data has been handled.
@type tp: C{int}
"""
state = 'regular'
name = None
tp = 0
dl = 0
para = 0
def convert(self, inf, outf):
self.write = outf.write
longline = ''
for line in inf.readlines():
if line.rstrip() and line.rstrip()[-1] == '\\':
longline += line.rstrip()[:-1] + ' '
continue
if longline:
line = longline + line
longline = ''
self.lineReceived(line)
self.closeTags()
self.write('</body>\n</html>\n')
outf.flush()
def lineReceived(self, line):
if line[0] == '.':
f = getattr(self, 'macro_' + line[1:3].rstrip().upper(), None)
if f:
f(line[3:].strip())
else:
self.text(line)
def continueReceived(self, cont):
if not cont:
return
if cont[0].isupper():
f = getattr(self, 'macro_' + cont[:2].rstrip().upper(), None)
if f:
f(cont[2:].strip())
else:
self.text(cont)
def closeTags(self):
if self.state != 'regular':
self.write('</%s>' % self.state)
if self.tp == 3:
self.write('</dd>\n\n')
self.tp = 0
if self.dl:
self.write('</dl>\n\n')
self.dl = 0
if self.para:
self.write('</p>\n\n')
self.para = 0
def paraCheck(self):
if not self.tp and not self.para:
self.write('<p>')
self.para = 1
def macro_TH(self, line):
self.write(
'<?xml version="1.0"?>\n'
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"\n'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
self.write('<html><head>\n')
parts = [stripQuotes(x) for x in line.split(' ', 2)] + ['', '']
title, manSection = parts[:2]
self.write('<title>%s.%s</title>' % (title, manSection))
self.write('</head>\n<body>\n\n')
self.write('<h1>%s.%s</h1>\n\n' % (title, manSection))
macro_DT = macro_TH
def macro_SH(self, line):
self.closeTags()
self.write('<h2>')
self.para = 1
self.text(stripQuotes(line))
self.para = 0
self.closeTags()
self.write('</h2>\n\n')
def macro_B(self, line):
words = line.split()
words[0] = '\\fB' + words[0] + '\\fR '
self.text(' '.join(words))
def macro_NM(self, line):
if not self.name:
self.name = line
self.text(self.name + ' ')
def macro_NS(self, line):
parts = line.split(' Ns ')
i = 0
for l in parts:
i = not i
if i:
self.text(l)
else:
self.continueReceived(l)
def macro_OO(self, line):
self.text('[')
self.continueReceived(line)
def macro_OC(self, line):
self.text(']')
self.continueReceived(line)
def macro_OP(self, line):
self.text('[')
self.continueReceived(line)
self.text(']')
def macro_FL(self, line):
parts = line.split()
self.text('\\fB-%s\\fR' % parts[0])
self.continueReceived(' '.join(parts[1:]))
def macro_AR(self, line):
parts = line.split()
self.text('\\fI %s\\fR' % parts[0])
self.continueReceived(' '.join(parts[1:]))
def macro_PP(self, line):
self.closeTags()
def macro_IC(self, line):
cmd = line.split(' ', 1)[0]
args = line[line.index(cmd) + len(cmd):]
args = args.split(' ')
text = cmd
while args:
arg = args.pop(0)
if arg.lower() == "ar":
text += " \\fU%s\\fR" % (args.pop(0),)
elif arg.lower() == "op":
ign = args.pop(0)
text += " [\\fU%s\\fR]" % (args.pop(0),)
self.text(text)
def macro_TP(self, line):
"""
Handle C{TP} token: start a definition list if it's first token, or
close previous definition data.
"""
if self.tp == 3:
self.write('</dd>\n\n')
self.tp = 1
else:
self.tp = 1
self.write('<dl>')
self.dl = 1
def macro_BL(self, line):
self.write('<dl>')
self.tp = 1
def macro_EL(self, line):
if self.tp == 3:
self.write('</dd>')
self.tp = 1
self.write('</dl>\n\n')
self.tp = 0
def macro_IT(self, line):
if self.tp == 3:
self.write('</dd>')
self.tp = 1
self.continueReceived(line)
def text(self, line):
"""
Handle a line of text without detected token.
"""
if self.tp == 1:
se | lf.write('<dt>')
if self.tp == 2:
self.write('<dd>')
self.paraCheck()
bits = line.split('\\')
self.write(escape(bits[0]))
for bit | in bits[1:]:
if bit[:2] == 'fI':
self.write('<em>' + escape(bit[2:]))
self.state = 'em'
elif bit[:2] == 'fB':
self.write('<strong>' + escape(bit[2:]))
self.state = 'strong'
elif bit[:2] == 'fR':
self.write('</%s>' % self.state)
self.write(escape(bit[2:]))
self.state = 'regular'
elif bit[:2] == 'fU':
# fU doesn't really exist, but it helps us to manage underlined
# text.
self.write('<u>' + escape(bit[2:]))
self.state = 'u'
elif bit[:3] == '(co':
self.write('©' + escape(bit[3:]))
else:
self.write(escape(bit))
if self.tp == 1:
self.write('</dt>')
self.tp = 2
elif self.tp == 2:
self.tp = 3
class ProcessingFunctionFactory:
def generate_lore(self, d, filenameGenerator=None):
ext = d.get('ext', '.html')
return lambda file,_: ManConverter().convert(open(file),
open(os.path.splitext(file)[0]+ext, 'w'))
factory = ProcessingFunctionFactory()
if __name__ == '__main__':
import sys
mc = ManConverter().convert(open(sys.argv[1]), sys.stdout)
|
jeremiah-c-leary/vhdl-style-guide | vsg/tests/attribute_specification/test_rule_500.py | Python | gpl-3.0 | 2,176 | 0.003676 |
import os
import unittest
from vsg.rules import attribute_specification
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_500_test_input.vhd'))
lExpected_lower = []
lExpected_lower.append('')
utils.read_file(os.path.join(sTestDir, 'rule_500_test_input.fixed_lower.vhd'), lExpected_lower)
lExpected_upper = []
lExpected_upper.append('')
utils.read_file(os.path.join(sTestDir, 'rule_500_test_input.fixed_upper.vhd'), lExpected_upper)
class test_attribute_specification_statement_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_500_lower(self):
oRule = attribute_specification.rule_500()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'attribute_specification')
self.assertEqual(oRule.identifier, '500')
lExpected = [6]
|
oRule.analyze(self.oFile)
self.assertEqual(utils.extract_violation_lines_from_violation_object(oRule.violations), lExpected)
def test_rule_500_upper(self):
oRule = attribute_specification.rule_500()
oRule.case = 'upper | '
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'attribute_specification')
self.assertEqual(oRule.identifier, '500')
lExpected = [4]
oRule.analyze(self.oFile)
self.assertEqual(utils.extract_violation_lines_from_violation_object(oRule.violations), lExpected)
def test_fix_rule_500_lower(self):
oRule = attribute_specification.rule_500()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_lower, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
def test_fix_rule_500_upper(self):
oRule = attribute_specification.rule_500()
oRule.case = 'upper'
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected_upper, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
|
susingha/0x_tools | programs/pycharm/py_classptr.py | Python | gpl-2.0 | 701 | 0.002853 | import random
class node:
def __init__(self, val):
self.val = val
self.random = random.randint(20, 50)
self.ptr = None
arr = []
arr.append(node(0))
arr.append(node(1))
arr.append(node(2))
arr.append(node(3))
arr.append(node(4))
# check the list
print "before modification"
for n in arr:
print n.val, n.random, n.ptr, n
# add them to a hashtable
hasht = {}
for n in arr:
hasht[n.val] = n
# change a value on a node in the hashta | ble
n = hasht[2]
n.random = 100
# change a value on a node in the hashtable directly
hasht[1].random = 500
print
print "after modifica | tion in hashtable 1"
# check the list again
for n in arr:
print n.val, n.random, n.ptr, n
|
LinkageIO/LocusPocus | tests/test_Chromosome.py | Python | mit | 1,038 | 0 | import pytest
from locuspocus import Chromosome
@pytest.fixture
def chr1():
return Chromosome("chr1", "A" * 500000)
@pytest.fixture
def chr2():
return Chromosome("chr1", "C" * 500000)
def test_init(chr1):
assert chr1
def test_init_from_seq():
x = Chromosome("chr1", ["a", "c", "g", "t"])
assert True
def test_slice(chr1):
x = chr1[1:100]
assert len(x) == 100
def test_invalid_slice(chr1):
with pytest.raises(ValueError):
chr1[0:100]
def test_get_bp(chr1):
assert chr1[12] == "A"
def test_get_bp_invalid_coordinate(chr1):
with pytest.raises(ValueError):
chr1[0]
def test_repr(chr1):
assert repr(chr1) == "Chromosome('AAAAAAAAAAAA...AAAAAAAAAAAAA')"
def test_equals(chr1, chr2):
x = chr1
y = chr2
assert x == x
assert x != y
def test_N_in_chromosome():
Chromosome("test", "aaacccgggtttAN")
assert True
def test_bad_nucleotide_in_chromosome_seq():
with pytest.raises(KeyError): |
Chromo | some("test", "abcd")
assert True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.