repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
moment-of-peace/EventForecast
association_rule/event_frequent.py
Python
lgpl-3.0
1,535
0.001954
import os,
sys origin_dir = 'del_201304now/' new_dir = 'freq_event_state/' files = os.listdir(origin_dir) state_dir = {} country_dir = {} for file in files: with open(origin_dir + file) as f: event_dir = {} for line in f: tmp_content = line.split('\t') code = tmp_content[4] location = tmp_content[14] tmp_loc = location.split(',') length = len(tmp_loc) state = '' if l
ength == 3: state = tmp_loc[1] elif length == 2: state = tmp_loc[0] else: continue country = tmp_loc[length-1] if country not in country_dir: country_dir[country] = {} if state in country_dir[country]: tmp_dir = country_dir[country][state] if code in tmp_dir: tmp_dir[code] += 1 else: tmp_dir[code] = 1 else: country_dir[country][state] = {} country_dir[country][state][code] = 1 for country_name,countries in country_dir.items(): for state_name, states in countries.items(): dir_path = '%s%s/%s/'%(new_dir, country_name, state_name) if not os.path.exists(dir_path): os.makedirs(dir_path) with open(dir_path+file, 'a') as writer: for event, freq in states.items(): writer.write(event+': '+str(freq)+'\n')
whd/python_moztelemetry
tests/test_spark.py
Python
mpl-2.0
5,208
0.002304
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, you can obtain one at http://mozilla.org/MPL/2.0/. import json import os from string import Template from uuid import uuid4 import pytest from moztelemetry.store import InMemoryStore from moztelemetry.dataset import Dataset from moztelemetry.spark import get_pings @pytest.fixture() def test_store(monkeypatch): data_dir = os.path.join(os.path.dirname(__file__), 'data') with open(os.path.join(data_dir, 'schema.json')) as s: schema = json.loads(s.read()) dimensions = [f['field_name'] for f in schema['dimensions']] dataset = Dataset('test-bucket', dimensions, InMemoryStore('test-bucket')) @staticmethod def from_source(source_name): return dataset monkeypatch.setattr(Dataset, 'from_source', from_source) return dataset.store def upload_ping(store, value, **kwargs): """Upload value to a given store""" ping_key_template = Template('$submission_date/$source_name/' '$source_version/$doc_type/$app/$channel/' '$version/$build_id/$filename') dimensions = { 'submission_date': '20160805', 'source_name': 'telemetry', 'source_version': '4', 'doc_type': 'saved_session', 'app': 'Firefox', 'channel': 'nightly', 'version': '51.0a1', 'build_id': '20160801074053', 'filename': uuid4() } dimensions.update(kwargs) key = ping_key_template.substitute(**dimensions) store.store[key] = value @pytest.fixture def mock_message_parser(monkeypatch): # monkeypatch the default `decoder` argument of `records` monkeypatch.setattr('moztelemetry.heka_message_parser.parse_heka_message', lambda message: (message.getvalue(),)) test_data_for_exact_match = [ ('doc_type', 'saved_session', 'main'), ('app', 'Firefox', 'Thunderbird'), ('version', '48.0', '46.0'), ('source_name', 'telemetry', 'other source'), ('source_version', '4', '2'), ] @pytest.mark.slow @pytest.mark.parametrize('filter_name,exact,wrong', test_data_for_exact_match) def test_get_pings_by_exact_match(test_store, mock_message_parser, spark_context, filter_name, exact, wrong): upload_ping(test_store, 'value1', **{filter_name: exact}) upload_ping(test_store, 'value2', **{filter_name: wrong}) pings = get_pings(spark_context, **{filter_name: exact}) assert pings.collect() == ['value1'] test_data_for_range_match = [ ('submission_date', '20160110', '20150101', '20160101', '20160120'), ('build_id', '20160801074050', '20160801074055', '20160801074049', '20160801074052'), ] @
pytest.mark.slow @pytest.mark.parametrize('filter_name,exact,wrong,start,end', test_data_for_range_match) def test_get_pings_by_range(test_store, mock_message_parser, spark_context, filter_name, exact, wrong, start, end): upload_ping(test_store, 'value1', **{filter_name: exact}) upload_ping(test_store, 'value2', **{filter_name: wrong}) pings = get_pings(spark_context, **{filter_name: exact}) assert pings.collect() == ['value1'] pings = g
et_pings(spark_context, **{filter_name: (start, end)}) assert pings.collect() == ['value1'] @pytest.mark.slow def test_get_pings_multiple_by_range(test_store, mock_message_parser, spark_context): upload_ping(test_store, 'value1', **{f[0]: f[1] for f in test_data_for_range_match}) upload_ping(test_store, 'value2', **{f[0]: f[2] for f in test_data_for_range_match}) pings = get_pings(spark_context, **{f[0]: f[1] for f in test_data_for_range_match}) assert pings.collect() == ['value1'] pings = get_pings(spark_context, **{f[0]: (f[3], f[4]) for f in test_data_for_range_match}) assert pings.collect() == ['value1'] def test_get_pings_fraction(test_store, mock_message_parser, spark_context): for i in range(1, 10+1): upload_ping(test_store, 'value', build_id=str(i)) pings = get_pings(spark_context) assert pings.count() == 10 pings = get_pings(spark_context, fraction=0.1) assert pings.count() == 1 def test_get_pings_wrong_schema(test_store, mock_message_parser, spark_context): with pytest.raises(ValueError): pings = get_pings(spark_context, schema=1) def test_get_pings_multiple_filters(test_store, mock_message_parser, spark_context): filters = dict(submission_date='20160101', channel='beta') upload_ping(test_store, 'value1', **filters) filters['app'] = 'Thunderbird' upload_ping(test_store, 'value2', **filters) pings = get_pings(spark_context, **filters) assert pings.collect() == ['value2'] def test_get_pings_none_filter(test_store, mock_message_parser, spark_context): upload_ping(test_store, 'value1', app='Firefox') upload_ping(test_store, 'value2', app='Thuderbird') pings = get_pings(spark_context, app=None) assert sorted(pings.collect()) == ['value1', 'value2'] pings = get_pings(spark_context, app='*') assert sorted(pings.collect()) == ['value1', 'value2']
jokedurnez/RequiredEffectSize
SampleSize/filter_fMRI_terms.py
Python
mit
831
0.01444
# check for specific mention of fMRI-related terms # to filter out non-fMRI papers def filter_fMRI_terms(pmids,fmri_terms=['fMRI','functional MRI', 'functional magnetic resonance'],usemesh=False): """ return pmids that include fMRI-related terms in MESH keywords or abstract or title""" good_pmids={} for pmid in pmids.keys(): goodkey=0 if usemesh: if 'MeshTerms' in pmids[pmid].key
s(): if 'Magnetic Resonance Imaging' in pmids[pmid]['MeshTerms']:
goodkey=1 for t in fmri_terms: if pmids[pmid]['Abstract'][0].find(t)>-1: goodkey=1 if pmids[pmid]['Title'].find(t)>-1: goodkey=1 if goodkey: good_pmids[pmid]=pmids[pmid] return good_pmids
alxgu/ansible
lib/ansible/modules/cloud/amazon/aws_ses_identity_policy.py
Python
gpl-3.0
7,303
0.00356
#!/usr/bin/python # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = ''' --- module: aws_ses_identity_policy short_description: Manages SES sending authorization policies description: - This module allows the user to manage sending authorization policies associated with an SES identity (email or domain). - SES authorization sending policies can be used to control what actors are able to send email on behalf of the validated identity and what conditions must be met by the sent emails. version_added: "2.6" author: Ed Costello (@orthanc) options: identity: description: | The SES identity to attach or remove a policy from. This can be either the full ARN or just the verified email or domain. required: true policy_name: description: The name used to identify the policy within the scope of the identity it's attached to. required: true policy: description: A properly formated JSON sending authorization policy. Required when I(state=present). state: description: Whether to create(or update) or delete the authorization policy on the identity. default: present choices: [ 'present', 'absent' ] requirements: [ 'botocore', 'boto3' ] extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Note: These examples do not set authentication details, see the AWS Guide for details. - name: add sending authorization policy to domain identity aws_ses_identity_policy: identity: example.com policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: add sending authorization policy to email identity aws_ses_identity_policy: identity: example@example.com policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: add sending authorization policy to identity using ARN aws_ses_identity_policy: identity: "arn:aws:ses:us-east-1:12345678:identity/example.com" policy_name: ExamplePolicy policy: "{{ lookup('template', 'policy.json.j2') }}" state: present - name: remove sending authorization policy aws_ses_identity_policy: identity: example.com policy_name: ExamplePolicy state: absent ''' RETURN = ''' policies: description: A list of all policies present on the identity after the operation. returned: success type: list sample: [ExamplePolicy] ''' from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.ec2 import compare_policies, AWSRetry import json try: from botocore.exceptions import BotoCoreError, ClientError except ImportError: pass # caught by imported HAS_BOTO3 def get_identity_policy(connection, module, identity, policy_name): try: response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to retrieve identity policy {policy}'.format(policy=policy_name)) policies = response['Policies'] if policy_name in policies: return policies[policy_name] return None def create_or_update_identity_policy(connection, module): identity = module.params.get('identity') policy_name = module.params.get('policy_name') required_policy = module.params.get('policy') required_policy_dict = json.loads(required_policy) changed = False policy = get_identity_policy(connection, module, identity, policy_name) policy_dict = json.loads(policy) if policy else None if compare_policies(policy_dict, required_policy_dict): changed = True try: if not module.check_mode: connection.put_identity_policy(Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to put identity policy {policy}'.format(policy=policy_name)) # Load the list of applied policies to include in the response. # In principle we should be able to just return the response, but given # eventual consistency behaviours in AWS it's plausible that we could # end up with a list that doesn't contain the policy we just added. # So out of paranoia check for this case and if we're missing the policy # just make sure it's present. # # As a nice side benefit this also means the return is correct in check mode try: policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to list identity policies') if policy_name is not None and policy_name not in policies_present: policies_present = list(policies_present) policies_present.append(policy_name) module.exit_json( changed=changed, policies=policies_present, ) def delete_identity_policy(connection, module): identity = module.params.get('identity') policy_name = module.params.get('policy_name') changed = False try: policies_present = connection.list_identity_policies(Identity=identity, aws_retry=True)['PolicyNames'] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to list identity policies') if policy_name in policies_present: try: if not module.check_mode: connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg='Failed to delete identity policy {policy}'.format(policy=policy_name)) changed = True policies_present = list(policies_present) policies_present.remove(policy_name) module.exit_json( changed=changed, policies=policies_present, ) def main(): module = AnsibleAWSModule( argument_spec={ 'identity': dict(required=True, type='str'), 'state': dict(default='present', choices=['present', 'absent']), 'policy_name': dict(required=True, type='str'), 'policy': dict(type='json', default=None), }, required_if=[['state', 'present', ['policy']]], supports_check_mode=True, ) # SES APIs seem to have a much lower throttling threshold than most of the rest of the AWS APIs. # Docs say 1 call per second. This shouldn't actually be a big problem for normal usage, but # the ansible build runs multiple instances of the test in parallel that's caused throttling # failures so apply a jittered backoff to call SES calls. connection = module.client('ses', retry_decorator=AWSRetry.jittered_backoff()) state = module.params.get(
"state") if state == 'present': create_or_update_identity_policy(connection, module) else: delete_ide
ntity_policy(connection, module) if __name__ == '__main__': main()
pedrolegold/uforge-cli
src/uforgecli/commands/subscription/subscription.py
Python
apache-2.0
29,418
0.007614
__author__ = "UShareSoft" from texttable import Texttable from ussclicore.argumentParser import ArgumentParser, ArgumentParserError from ussclicore.cmd import Cmd, CoreGlobal from uforgecli.utils import org_utils from ussclicore.utils import printer from ussclicore.utils import generics_utils from uforgecli.utils.uforgecli_utils import * from uforge.objects import uforge from subscription_admin import Subscription_Admins from subscription_role import Subscription_Roles from subscription_format import Subscription_Format from subscription_os import Subscription_Os from subscription_quota import Subscription_Quota from uforgecli.utils import uforgecli_utils import pyxb import shlex import sys class Subscription_Cmd(Cmd, CoreGlobal): """Manage subscription profiles : list profile, create profiles, update profiles""" cmd_name = "subscription" def __init__(self): self.subCmds = {} self.generate_sub_commands() super(Subscription_Cmd, self).__init__() def generate_sub_commands(self): subscriptionRoles = Subscription_Roles() self.subCmds[subscriptionRoles.cmd_name] = subscriptionRoles subscriptionAdmins = Subscription_Admins() self.subCmds[subscriptionAdmins.cmd_name] = subscriptionAdmins subscriptionFormat = Subscription_Format() self.subCmds[subscriptionFormat.cmd_name] = subscriptionFormat subscriptionOs = Subscription_Os() self.subCmds[subscriptionOs.cmd_name] = subscriptionOs subscriptionQuota = Subscription_Quota() self.subCmds[subscriptionQuota.cmd_name] = subscriptionQuota def arg_list(self): doParser = ArgumentParser(prog=self.cmd_name + " list", add_help=True, description="List all the subscription profiles for a given organization. If no organization is provided the default organization is used.") optional = doParser.add_argument_group("optional arguments") optional.add_argument('--org', dest='org', required=False, help="The organization name. If no organization is provided, then the default organization is used.") return doParser def do_list(self, args): try: doParser = self.arg_list() doArgs = doParser.parse_args(shlex.split(args)) org = org_utils.org_get(self.api, doArgs.org) # call UForge API printer.out("Getting all the subscription profiles for organization ...") subscriptions = sel
f.api.Orgs(org.dbId).Subscriptions().Getall(Search=None) subscriptions = generics_utils.order_list_object_by(subscrip
tions.subscriptionProfiles.subscriptionProfile, "name") if subscriptions is None or len(subscriptions) == 0: printer.out("There is no subscriptions in [" + org.name + "] ") return 0 printer.out("List of subscription profiles in [" + org.name + "] :") table = Texttable(200) table.set_cols_align(["c", "c", "c", "c"]) table.header(["Name", "Code", "Active", "description"]) for subscription in subscriptions: if subscription.active: active = "X" else: active = "" table.add_row([subscription.name, subscription.code, active, subscription.description]) print table.draw() + "\n" printer.out("Foumd " + str(len(subscriptions)) + " subscription profile(s).") return 0 except ArgumentParserError as e: printer.out("ERROR: In Arguments: " + str(e), printer.ERROR) self.help_list() except Exception as e: return handle_uforge_exception(e) def help_list(self): doParser = self.arg_list() doParser.print_help() def arg_info(self): doParser = ArgumentParser(prog=self.cmd_name + " info", add_help=True, description="Get detailed information on a subscription profile within an organization.") mandatory = doParser.add_argument_group("mandatory arguments") optional = doParser.add_argument_group("optional arguments") mandatory.add_argument('--name', dest='name', required=True, help="The name of the subscription profile") optional.add_argument('--org', dest='org', required=False, help="The organization name. If no organization is provided, then the default organization is used.") return doParser def do_info(self, args): try: # add arguments doParser = self.arg_info() doArgs = doParser.parse_args(shlex.split(args)) # call UForge API printer.out("Getting subscription profile with name [" + doArgs.name + "]...") org = org_utils.org_get(self.api, doArgs.org) subscriptions = self.api.Orgs(org.dbId).Subscriptions().Getall(Search=None) printer.out("Subscription profile for [" + doArgs.name + "] :") subscription = subscriptions.subscriptionProfiles.subscriptionProfile exist = False for item in subscription: if item.name == doArgs.name: exist = True subscription = item if not exist: printer.out("Subscription profile requested don't exist in [" + org.name + "]") return 0 table = Texttable(200) table.set_cols_align(["l", "l"]) table.header(["Info", "Value"]) table.add_row(["Name", subscription.name]) table.add_row(["Code", subscription.code]) if subscription.active: active = "X" else: active = "" table.add_row(["Active", active]) if subscription.roles.role: nb = len(subscription.roles.role) table.add_row(["Roles", str(nb)]) else: table.add_row(["Roles", "None"]) if subscription.admins.admin: nbAdmin = len(subscription.admins.admin) table.add_row(["Administrators", str(nbAdmin)]) else: table.add_row(["Administrators", "None"]) if subscription.distributions.distribution: nbDist = len(subscription.distributions.distribution) table.add_row(["Operating Systems", str(nbDist)]) else: table.add_row(["Operating Systems", "None"]) if subscription.formats.format: nbFormat = len(subscription.formats.format) table.add_row(["Image Formats", str(nbFormat)]) else: table.add_row(["Image Formats", "None"]) print table.draw() + "\n" if subscription.description is not None or subscription.description ==
droundy/deft
papers/histogram/figs/gamma-multi-plot.py
Python
gpl-2.0
2,481
0.009674
from __future__ import division import numpy as np import matplotlib.pyplot as plt import sys, glob, matplotlib import colors matplotlib.rcParams['text.usetex'] = True matplotlib.rc('font', family='serif') filename = sys.argv[1] plt.figure(figsize=(5, 4)) print('starting!') try: for wl in glob.glob("data/gamma/%s/wl*.txt" % filename): print("in wl") print('vanilla_wang_landau'+ wl[len("data/gamma/%s/wl" % filename):-4]) wlmoves, wlfactor = np.loadtxt(wl, dtype = float, unpack = True) data = np.loadtxt(wl) moves = data[:, 0] factor = data[:, 1] if (data[0, 0] == 'wl_factor'): # using c++ data! moves = np.zeros(len(wlmoves)*2+2) factor = np.zeros_like(moves) factor[0] = 1 moves[0] = 1 for i in range(len(wlmoves)): moves[2*i+1] = wlmoves[i] moves[2*i+2] = wlmoves[i] factor[2*i+1] = wlfactor[i]*2 factor[2*i+2] = wlfactor[i] colors.loglog(moves, factor, 'vanilla_wang_landau' + wl[len("data/gamma/%s/wl" % filename):-4]) plt.ylim(ymin=1e-10, ymax=1e1) plt.xlim(xmin=1e0, xmax=2e12) except: pass print((glob.glob("data/gamma/%s/sad*.dat" % filename))) try: for sad in glob.glob("data/gamma/%s/sad*.dat" % filename): data = np.loadtxt(sad) ts = data[:, 0] avg_gamma = data[:, 1] sadname = sad.split('/')[-1].split('.')[0]
plt.ylim(ymin=1e-10, ymax=1e1) plt.xlim(xmin=1e0, xmax=2e12) #print(avg_gamma) #print(ts) colors.loglog(ts, avg_gamma, sadname) if data.shape[1] > 2: max_avg_gamma = data[:, 2] min_avg_gamma = data[
:, 3] plt.fill_between(ts, min_avg_gamma, max_avg_gamma, edgecolor='none', linewidth=0, color=colors.color('sad'), alpha=0.1, zorder=-51) except: raise def gamma_sa(t, t0): return t0/np.maximum(t, t0) t0s = ['1e3', '1e4', '1e5', '1e6', '1e7'] for t0 in t0s: colors.loglog(ts, gamma_sa(ts, float(t0)), 'samc-%s-%s' %(t0, filename.replace('n', ''))) plt.xlabel(r'$\textrm{Moves}$') plt.ylabel(r'$\gamma_{t}$') colors.legend() plt.tight_layout() plt.savefig('figs/gamma-%s.pdf' % filename.replace('.', '_')) if 'noshow' not in sys.argv: plt.show()
kernelci/kernelci-backend
app/utils/build/__init__.py
Python
lgpl-2.1
12,705
0
# Copyright (C) Collabora Limited 2017,2019 # Author: Guillaume Tucker <guillaume.tucker@collabora.com> # Author: dcz-collabora <dorota.czaplejewicz@collabora.co.uk> # # Copyright (C) Linaro Limited 2015,2016,2017,2018,2019 # Author: Matt Hart <matthew.hart@linaro.org> # Author: Milo Casagrande <milo.casagrande@linaro.org> # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with this library; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """Functions to import builds/defconfigs.""" try: import simplejson as json except ImportError: import json try: from os import walk except ImportError: from scandir import walk import bson import datetime import io import os import pymongo.errors import re import redis import types import models import models.build as mbuild import models.job as mjob import utils import utils.database.redisdb as redisdb import utils.db import utils.errors ERR_ADD = utils.errors.add_error ERR_UPDATE = utils.errors.update_errors # Regex to extract the kernel version. # Should match strings that begins as: # 4.1-1234-g12345 # 4.1.14-rc8-1234-g12345 # The 'rc*' pattern is part of the kernel version. # TODO: add patches count extraction as well. KERNEL_VERSION_MATCH = re.compile(r"^(?P<version>\d+\.{1}\d+(?:\.{1}\d+)?)") KERNEL_RC_VERSION_MATCH = re.compile( r"^(?P<version>\d+\.{1}\d+(?:\.{1}\d+)?-{1}rc\d*)") def _search_prev_build_doc(build_doc, database): """Search for a similar defconfig document in the database. Search for an already imported defconfig/build document in the database and return its object ID and creation date. This is done to make sure we do not create double documents when re-importing the same data or updating it. :param build_doc: The new defconfig document. :param database: The db connection. :return The previous doc ID and its creation date, or None. """ doc_id = None c_date = None if build_doc and database: spec = { models.ARCHITECTURE_KEY: build_doc.arch, models.DEFCONFIG_FULL_KEY: build_doc.defconfig_full, models.DEFCONFIG_KEY: build_doc.defconfig, models.GIT_BRANCH_KEY: build_doc.git_branch, models.JOB_KEY: build_doc.job, models.KERNEL_KEY: build_doc.kernel, models.BUILD_ENVIRONMENT_KEY: build_doc.build_environment } collection = database[models.BUILD_COLLECTION] prev_doc_count = collection.count_documents(spec, limit=2) if prev_doc_count > 0: if prev_doc_count == 1: prev_doc = utils.db.find_one2(collection, spec) doc_id = prev_doc.get(models.ID_KEY) c_date = prev_doc.get(models.CREATED_KEY) else: utils.LOG.warn( "Found multiple defconfig docs matching: {}".format(spec)) utils.LOG.error( "Cannot keep old document ID, don't know which one to " "use!") return doc_id, c_date class BuildError(Exception): def __init__(self, code, *args, **kwargs): self.code = code self.from_exc = kwargs.pop('from_exc', None) super(BuildError, self).__init__(*args, **kwargs) def _update_job_doc(job_doc, job_id, status, build_doc, database): """Update the JobDocument with values from a BuildDocument. :param job_doc: The job document to update. :type job_doc: JobDocument :param status: The job status value. :type status: string :param build_doc: A BuildDocument object. :type build_doc: BuildDocument """ to_update = False ret_val = 201 if (job_id and job_doc.id !
= job_id): job_doc.id = job_id to_update = T
rue if job_doc.status != status: job_doc.status = status to_update = True no_git = all([ not job_doc.git_url, not job_doc.git_commit, not job_doc.git_describe, not job_doc.git_describe_v ]) no_compiler = all([ not job_doc.compiler, not job_doc.compiler_version, not job_doc.compiler_version_ext, not job_doc.compiler_version_full, not job_doc.cross_compile ]) if (build_doc and no_git and no_compiler): # Kind of a hack: # We want to store some metadata at the job document level as well, # like git tree, git commit... # Since, at the moment, we do not have the metadata file at the job # level we need to pick one from the build documents, and extract some # values. if isinstance(build_doc, mbuild.BuildDocument): if (build_doc.job == job_doc.job and build_doc.kernel == job_doc.kernel): job_doc.git_commit = build_doc.git_commit job_doc.git_describe = build_doc.git_describe job_doc.git_describe_v = build_doc.git_describe_v job_doc.kernel_version = build_doc.kernel_version job_doc.git_url = build_doc.git_url job_doc.compiler = build_doc.compiler job_doc.compiler_version = build_doc.compiler_version job_doc.compiler_version_ext = build_doc.compiler_version_ext job_doc.compiler_version_full = build_doc.compiler_version_full job_doc.cross_compile = build_doc.cross_compile to_update = True if to_update: ret_val, _ = utils.db.save(database, job_doc) return ret_val def _get_or_create_job(meta, database, db_options): """Get or create a job in the database. :param job: The name of the job. :type job: str :param kernel: The name of the kernel. :type kernel: str :param database: The mongodb database connection. :param db_options: The database connection options. :type db_options: dict :return a 3-tuple: return value, job document and job ID. """ ret_val = 201 job_doc = None job_id = None rev = meta["bmeta"]["revision"] tree, descr, branch = (rev[key] for key in ["tree", "describe", "branch"]) redis_conn = redisdb.get_db_connection(db_options) # We might be importing builds in parallel through multi-processes. Keep a # lock here when looking for a job or we might end up with multiple job # creations. # ToDo: rename Job as Revision since that's what it really is lock_key = "build-import-{}-{}-{}".format(tree, descr, branch) with redis.lock.Lock(redis_conn, lock_key, timeout=5): p_doc = utils.db.find_one2( database[models.JOB_COLLECTION], { models.JOB_KEY: tree, models.KERNEL_KEY: descr, models.GIT_BRANCH_KEY: branch, }) if p_doc: job_doc = mjob.JobDocument.from_json(p_doc) job_id = job_doc.id else: job_doc = mjob.JobDocument(tree, descr, branch) job_doc.status = models.BUILD_STATUS job_doc.created_on = datetime.datetime.now(tz=bson.tz_util.utc) ret_val, job_id = utils.db.save(database, job_doc) job_doc.id = job_id return ret_val, job_doc, job_id def _get_build(meta, database): """Make a BuildDocument object and return it""" bmeta, steps, artifacts = (meta[key] for key in [ "bmeta", "steps", "artifacts" ]) env, kernel, rev, build = (bmeta[key] for key in [ "environment", "kernel", "revision", "build" ]) doc = mbuild.BuildDocument( rev["tree"],
dridk/antibiobank
antibiobank/wsgi.py
Python
gpl-3.0
397
0.002519
""" WSGI confi
g for antibiobank project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/ """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "antibiobank.settings") from django.core.wsgi import get
_wsgi_application application = get_wsgi_application()
tinkerinestudio/Tinkerine-Suite
TinkerineSuite/python/Lib/power/linux.py
Python
agpl-3.0
8,180
0.002445
# coding=utf-8 """ Implements PowerManagement functions using /sys/class/power_supply/* See doc/linux for platform-specific details. """ __author__ = 'kulakov.ilya@gmail.com' import os import warnings from power import common POWER_SUPPLY_PATH = '/sys/class/power_supply' if not os.access(POWER_SUPPLY_PATH, os.R_OK): raise RuntimeError("Unable to read {path}.".format(path=POWER_SUPPLY_PATH)) class PowerManagement(common.PowerManagementBase): @staticmethod def power_source_type(supply_path): """ @param supply_path: Path to power supply @return: One of common.POWER_TYPE_* @raise: Runtime error if type of power source is not supported """ with open(os.path.join(supply_path, 'type'), 'r') as type_file: type = type_file.readline().strip() if type == 'Mains': return common.POWER_TYPE_AC elif type == 'UPS': return common.POWER_TYPE_UPS elif type == 'Battery': return common.POWER_TYPE_BATTERY else: raise RuntimeError("Type of {path} ({type}) is not supported".format(path=supply_path, type=type)) @staticmethod def is_ac_online(supply_path): """ @param supply_path: Path to power supply @return: True if ac is online. Otherwise False """ with open(os.path.join(supply_path, 'online'), 'r') as online_file: return online_file.readline().strip() == '1' @staticmethod def is_battery_present(supply_path): """ @param supply_path: Path to power supply @return: True if battery is present. Otherwise False """ with open(os.path.join(supply_path, 'present'), 'r') as present_file: return present_file.readline().strip() == '1' @staticmethod def is_battery_discharging(supply_path):
""" @param supply_path: Path to power supply @return: True if ac is online. Otherwise False
""" with open(os.path.join(supply_path, 'status'), 'r') as status_file: return status_file.readline().strip() == 'Discharging' @staticmethod def get_battery_state(supply_path): """ @param supply_path: Path to power supply @return: Tuple (energy_full, energy_now, power_now) """ with open(os.path.join(supply_path, 'energy_now'), 'r') as energy_now_file: with open(os.path.join(supply_path, 'power_now'), 'r') as power_now_file: with open(os.path.join(supply_path, 'energy_full'), 'r') as energy_full_file: energy_now = float(energy_now_file.readline().strip()) power_now = float(power_now_file.readline().strip()) energy_full = float(energy_full_file.readline().strip()) return energy_full, energy_now, power_now def get_providing_power_source_type(self): """ Looks through all power supplies in POWER_SUPPLY_PATH. If there is an AC adapter online returns POWER_TYPE_AC. If there is a discharging battery, returns POWER_TYPE_BATTERY. Since the order of supplies is arbitrary, whatever found first is returned. """ for supply in os.listdir(POWER_SUPPLY_PATH): supply_path = os.path.join(POWER_SUPPLY_PATH, supply) try: type = self.power_source_type(supply_path) if type == common.POWER_TYPE_AC: if self.is_ac_online(supply_path): return common.POWER_TYPE_AC elif type == common.POWER_TYPE_BATTERY: if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path): return common.POWER_TYPE_BATTERY else: warnings.warn("UPS is not supported.") except (RuntimeError, IOError) as e: warnings.warn("Unable to read properties of {path}: {error}".format(path=supply_path, error=str(e))) return common.POWER_TYPE_AC def get_low_battery_warning_level(self): """ Looks through all power supplies in POWER_SUPPLY_PATH. If there is an AC adapter online returns POWER_TYPE_AC returns LOW_BATTERY_WARNING_NONE. Otherwise determines total percentage and time remaining across all attached batteries. """ all_energy_full = [] all_energy_now = [] all_power_now = [] for supply in os.listdir(POWER_SUPPLY_PATH): supply_path = os.path.join(POWER_SUPPLY_PATH, supply) try: type = self.power_source_type(supply_path) if type == common.POWER_TYPE_AC: if self.is_ac_online(supply_path): return common.LOW_BATTERY_WARNING_NONE elif type == common.POWER_TYPE_BATTERY: if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path): energy_full, energy_now, power_now = self.get_battery_state(supply_path) all_energy_full.append(energy_full) all_energy_now.append(energy_now) all_power_now.append(power_now) else: warnings.warn("UPS is not supported.") except (RuntimeError, IOError) as e: warnings.warn("Unable to read properties of {path}: {error}".format(path=supply_path, error=str(e))) try: total_percentage = sum(all_energy_full) / sum(all_energy_now) total_time = sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)]) if total_time <= 10.0: return common.LOW_BATTERY_WARNING_FINAL elif total_percentage <= 22.0: return common.LOW_BATTERY_WARNING_EARLY else: return common.LOW_BATTERY_WARNING_NONE except ZeroDivisionError as e: warnings.warn("Unable to calculate low battery level: {error}".format(error=str(e))) return common.LOW_BATTERY_WARNING_NONE def get_time_remaining_estimate(self): """ Looks through all power sources and returns total time remaining estimate or TIME_REMAINING_UNLIMITED if ac power supply is online. """ all_energy_now = [] all_power_now = [] for supply in os.listdir(POWER_SUPPLY_PATH): supply_path = os.path.join(POWER_SUPPLY_PATH, supply) try: type = self.power_source_type(supply_path) if type == common.POWER_TYPE_AC: if self.is_ac_online(supply_path): return common.TIME_REMAINING_UNLIMITED elif type == common.POWER_TYPE_BATTERY: if self.is_battery_present(supply_path) and self.is_battery_discharging(supply_path): energy_full, energy_now, power_now = self.get_battery_state(supply_path) all_energy_now.append(energy_now) all_power_now.append(power_now) else: warnings.warn("UPS is not supported.") except (RuntimeError, IOError) as e: warnings.warn("Unable to read properties of {path}: {error}".format(path=supply_path, error=str(e))) if len(all_energy_now) > 0: try: return sum([energy_now / power_now * 60.0 for energy_now, power_now in zip(all_energy_now, all_power_now)]) except ZeroDivisionError as e: warnings.warn("Unable to calculate time remaining estimate: {error}".format(error=str(e))) return common.TIME_REMAINING_UNKNOWN else: return common.TIME_REMAINING_UNKNOWN def add_observer(self, observer): warnings.warn("Current system does not support observing.") pass def remove_observer(self, observer): warnings.warn("Current system does not support observing.") pass
trea-uy/djangocms-plugin-image-gallery
image_gallerys/cms_plugins.py
Python
apache-2.0
1,027
0.000974
# coding: utf-8 import
re from cms.plugin_base import CMSPluginBase from cms.plugin_pool import plugin_pool from .models import * from django.utils.translation import ugettext as _ from django.contrib im
port admin from django.forms import ModelForm, ValidationError class GalleryForm(ModelForm): class Meta: model = Gallery def clean_domid(self): data = self.cleaned_data['domid'] if not re.match(r'^[a-zA-Z_]\w*$', data): raise ValidationError( _("The name must be a single word beginning with a letter")) return data class GalleryItemInline(admin.TabularInline): model = GalleryItem class GalleryPlugin(CMSPluginBase): model = Gallery form = GalleryForm name = _("Gallery") render_template = "gallery.html" inlines = [ GalleryItemInline, ] def render(self, context, instance, placeholder): context.update({'instance': instance}) return context plugin_pool.register_plugin(GalleryPlugin)
unnikrishnankgs/va
venv/lib/python3.5/site-packages/tensorflow/models/object_detection/core/losses.py
Python
bsd-2-clause
23,854
0.002893
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classification and regression loss functions for object detection. Localization losses: * WeightedL2LocalizationLoss * WeightedSmoothL1LocalizationLoss * WeightedIOULocalizationLoss Classification losses: * WeightedSigmoidClassificationLoss * WeightedSoftmaxClassificationLoss * BootstrappedSigmoidClassificationLoss """ from abc import ABCMeta from abc import abstractmethod import tensorflow as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.utils import ops slim = tf.contrib.slim class Loss(object): """Abstract base class for loss functions.""" __metaclass__ = ABCMeta def __call__(self, prediction_tensor, target_tensor, ignore_nan_targets=False, scope=None, **params): """Call the loss function. Args: prediction_tensor: a tensor representing predicted quantities. target_tensor: a tensor representing regression or classification targets. ignore_nan_targets: whether to ignore nan targets in the loss computation. E.g. can be used if the target tensor is missing groundtruth data that shouldn't be factored into the loss. scope: Op scope name. Defaults to 'Loss' if None. **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: a tensor representing the value of the loss function. """ with tf.name_scope(scope, 'Loss', [prediction_tensor, target_tensor, params]) as scope: if ignore_nan_targets: target_tensor = tf.where(tf.is_nan(target_tensor), prediction_tensor, target_tensor) return self._compute_loss(prediction_tensor, target_tensor, **params) @abstractmethod def _compute_loss(self, prediction_tensor, target_tensor, **params): """Method to be overriden by implementations. Args: prediction_tensor: a tensor representing predicted quantities target_tensor: a tensor representing regression or classification targets **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: a tensor representing the value of the loss function """ pass class WeightedL2LocalizationLoss(Loss): """L2 localization loss function with anchorwise output support. Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2 """ def __init__(self, anchorwise_output=False): """Constructor. Args: anchorwise_output: Outputs loss per anchor. (default False) """ self._anchorwise_output = anchorwise_output def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a (scalar) tensor representing the value of the loss function or a float tensor of shape [batch_size, num_anchors] """ weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims( weights, 2) square_diff = 0.5 * tf.square(weighted_diff) if self._anchorwise_output: return tf.reduce_sum(square_diff, 2) return tf.reduce_sum(square_diff) class Weigh
tedSmoothL1LocalizationL
oss(Loss): """Smooth L1 localization loss function. The smooth L1_loss is defined elementwise as .5 x^2 if |x|<1 and |x|-.5 otherwise, where x is the difference between predictions and target. See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015) """ def __init__(self, anchorwise_output=False): """Constructor. Args: anchorwise_output: Outputs loss per anchor. (default False) """ self._anchorwise_output = anchorwise_output def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a (scalar) tensor representing the value of the loss function """ diff = prediction_tensor - target_tensor abs_diff = tf.abs(diff) abs_diff_lt_1 = tf.less(abs_diff, 1) anchorwise_smooth_l1norm = tf.reduce_sum( tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5), 2) * weights if self._anchorwise_output: return anchorwise_smooth_l1norm return tf.reduce_sum(anchorwise_smooth_l1norm) class WeightedIOULocalizationLoss(Loss): """IOU localization loss function. Sums the IOU for corresponding pairs of predicted/groundtruth boxes and for each pair assign a loss of 1 - IOU. We then compute a weighted sum over all pairs which is returned as the total loss. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded predicted boxes target_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded target boxes weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a (scalar) tensor representing the value of the loss function """ predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [-1, 4])) target_boxes = box_list.BoxList(tf.reshape(target_tensor, [-1, 4])) per_anchor_iou_loss = 1.0 - box_list_ops.matched_iou(predicted_boxes, target_boxes) return tf.reduce_sum(tf.reshape(weights, [-1]) * per_anchor_iou_loss) class WeightedSigmoidClassificationLoss(Loss): """Sigmoid cross entropy classification loss function.""" def __init__(self, anchorwise_output=False): """Constructor. Args: anchorwise_output: Outputs loss per anchor. (default False) """ self._anchorwise_output = anchorwise_output def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape [batch_size, num_anchors] class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a (scalar) tensor representing the value of the loss function or a float tensor of shape [batch_size, num_anchors] """ weights = tf.expand_dims(weights, 2) if cl
nzbget/nzbget
tests/functional/parcheck/parcheck_opt2_test.py
Python
gpl-2.0
1,204
0.017442
nzbget_options = ['ParCheck=manual', 'ParQuick=yes', 'PostStrategy=sequential'] def test_parchecker_healthy(nserv, nzbget): hist = nzbget.download_nzb('parchecker2.nzb') assert hist['Status'] == 'SUCCESS/HEALTH' def test_parchecker_repair(nserv, nzbget): nzb_content = nzbget.load_nzb('parchecker2.nzb') nzb_content = nzb_content.replace('parchecker2/testfile.7z.001?16=45000:3000', 'parchecker2/testfile.7z.001?16=45000:3000!0') hist = nzbget.download_nzb('parchecker.repair.nzb', nzb_content) assert hist[
'Status'] == 'WARNING/DAMAGED' def test_parchecker_middle(nserv, nzbget): nzb_content = nzbget.load_nzb('parchecker2.nzb') nzb_content = nzb_content.replace('<segment bytes="3
000" number="16">parchecker2/testfile.7z.001?16=45000:3000</segment>', '') hist = nzbget.download_nzb('parchecker.middle.nzb', nzb_content) assert hist['Status'] == 'WARNING/DAMAGED' def test_parchecker_last(nserv, nzbget): nzb_content = nzbget.load_nzb('parchecker2.nzb') nzb_content = nzb_content.replace('<segment bytes="3000" number="18">parchecker2/testfile.7z.001?18=51000:200</segment>', '') hist = nzbget.download_nzb('parchecker.last.nzb', nzb_content) assert hist['Status'] == 'SUCCESS/HEALTH'
moosd/HomeAutomation
main.py
Python
gpl-2.0
1,574
0.014612
#!/usr/bin/env python import time import os from plugins import * import CustomHandlers # Light switch #try: # CapSwitch.CapSwitch(SerialEnumeration.find("capbutton1")) #except: # pass try: # Telephony services rotdial = RotaryDial.RotaryDial(SerialEnumeration.find("rotarydial")) # Handle telephony requests and connect calls CustomHandlers.RotaryHandler(rotdial.ser) except: pass # Interface with a light production mechanism print(" * Connecting lightbulbs...") test = Lightbulb.connect(1, 71, 151, Lightbulb.GattQueue("44:A6:E5:03:27:F9", "hci0")) test2 = Lightbulb.connect(2, 89, 119, Lightbulb.GattQueue("44:A6:E5:03:27:DF", "hci1")) print(" * Lightbulbs connected!") # Various telemetry and electromagnetic relay control #srb = SensorRelayBoard.SensorRelayBoard(SerialEnumeration.find("sensorrelay")) #SensorRelayBoard.Relay(3, 1, srb.ser) # Allow control over a Highly Terrific but Troublesome Protocol HTTP_API.HTTP_API() # Test temporal quantification mechanism #CustomHandlers.Timer().start() #test.setStatus(0) #test2.setStatus(0) #time.sleep(2) #test.setParameter("color", 1) #t
est.setParameter("temp", 144) #time.sleep(5) #test.setStatus(1) #test2.setStatus(1) #print test.getParameters() #b = 100 #while b > 1: # test.setParameter("brightness", b) # b = b-1 # time.sleep(0.1) #test.setParameter("color", 1) #test.setParameter("brightness", 100) #TestHandler() #TestSensor() while True: try: time.sleep(1) except
KeyboardInterrupt: print '^C received, shutting down' os._exit(0)
jni/useful-histories
maree-faux-data.py
Python
bsd-3-clause
2,032
0.004429
# IPython log file with open('request.txt', mode='r') as fin: wells = [line.strip() for line in fin] wells plates = ['_'.join(well.split('_')[:2]) for well in wells] plates set.intersection(plates, os.listdir('/Volumes/King-Ecad-Screen-Tiffs2/tiffs/')) set.intersection(set(plates), set(os.listdir('/Volumes/King-Ecad-Screen-Tiffs2/tiffs/'))) hdd = '/Volumes/King-Ecad-Screen-Tiffs2/tiffs/' avail = list(set.intersection(set(plates), set(os.listdir(hdd)))) from glob import glob import shutil get_ipython().run_line_mag
ic('pinfo', 'shutil.copyfile') len(plates) == len(set(plates)) for well in wells: plate = '_'.join(well.split('_')[:2]) if plate in avail: files = sorted(glob(os.path.join(hd
d, plate, well + '*'))) for file in files: shutil.copyfile(file, '.') print(f'copied: {os.path.basename(file)}') for well in wells: plate = '_'.join(well.split('_')[:2]) if plate in avail: files = sorted(glob(os.path.join(hdd, plate, well + '*'))) for file in files: basename = os.path.basename(file) shutil.copyfile(file, os.path.join('.', basename)) print(f'copied: {basename}') hdd = '/Volumes/King-Ecad-Screen-Tiffs/tiff/ hdd = '/Volumes/King-Ecad-Screen-Tiffs/tiff/' avail2 = list(set.intersection(set(plates), set(os.listdir(hdd)))) set.difference(set(plates), set(avail2).union(set(avail))) not_avail = _18 for well in wells: plate = '_'.join(well.split('_')[:2]) if plate in avail2: files = sorted(glob(os.path.join(hdd, plate, well + '*'))) for file in files: basename = os.path.basename(file) shutil.copyfile(file, os.path.join('.', basename)) print(f'copied: {basename}') for well in wells: plate = '_'.join(well.split('_')[:2]) if plate in not_avail: print(well) for well in wells: plate = '_'.join(well.split('_')[:2]) if plate not in not_avail: print(well)
marlboromoo/basinboa
basinboa/user/role.py
Python
mit
87
0.011494
#!/usr/
bin/env python """ role of player """ ROLE_ADMIN = 'admin' ROLE_USER = 'user'
FJFranklin/wifi-py-rpi-car-controller
noise/Noise/Visible.py
Python
mit
4,546
0.007259
import numpy as np from .Basis import Basis class Visible(object): def __init__(self, origin, window, target=None): self.origin = origin self.window = window self.target = target def crop_visible(self, visible): cropped_poly = self.window.crop_2D_poly(visible.window) if cropped_poly is None: # cropped away, or lost amidst the tolerances... return None # project polygon back onto original plane cropped_proj = visible.target.project_3D_poly(self.origin, cropped_poly) if cropped_proj is None: print('Projection error') return None return Visible(self.origin, cropped_poly, cropped_proj) def nearest_intersection(self, visible): # assumes coincident origins & coplanar windows # i.e., self.origin == visible.origin & self.window.plane == visible.window.plane v3D, count = visible.target.vertices() v1 = None v2 = None d1 = 0 d2 = 0 for i1 in range(0, count): i2 = i1 + 1 if i2 == count: i2 = 0 Bi = visible.window.verts[i2,:] - visible.window.verts[i1,:] Bj = np.asarray([-Bi[1],Bi[0]]) vertex_above = False vertex_below = False for s in range(0, self.window.count): dp = np.dot(self.window.verts[s,:] - visible.window.verts[i1,:], Bj) if Basis.is_strictly_positive(dp): vertex_above = True if Basis.is_strictly_negative(dp): vertex_below = True if vertex_above and vertex_below: # a valid intersecting edge dist_o1 = np.linalg.norm(v3D[i1,:] - self.origin) dist_o2 = np.linalg.norm(v3D[i2,:] - self.origin) if dist_o1 < dist_o2: v_min = visible.window.verts[i1,:] v_max = visible.window.verts[i2,:] d_min = dist_o1 d_max = dist_o2 else: v_min = visible.window.verts[i2,:] v_max = visible.window.verts[i1,:] d_min = dist_o2 d_max = dist_o1 if (v1 is None) or ((d_min < d1) or ((d_min == d1) and (d_max < d2))): v1 = v_min v2 = v_max d1 = d_min d2 = d_max break return v1, d1, v2, d2 def compare_visible(self, visible, printing=False): # assumes coincident origins & coplanar windows # i.e., self.origin == visible.origin & self.window.plane == visible.window.plane # is_exterior is True if all points in visible.window are outside self.window # is_interior is True if all points in visible.window are within self.window # is_farther is True if any points in visible.target are opposite to the origin than self.target.plane is_exterior = False is_interior = True is_farther = True is_coplanar = True for s1 in range(0, self.window.count): s2 = s1 + 1 if s2 == self.window.count: s2 = 0 Bi = self.window.verts[s2,:] - self.window.verts[s1,:] Bj = np.asarray([-Bi[1],Bi[0]]) # points inwards; not a normalised basis vector all_outsid
e = True for i in range(0, visible.window.count): dp = np.dot(visible.window.verts[i,:] - self.window.verts[s1,:], Bj) if Basis.is_strictly_positive(dp): all_outside = False if Basis.is_strictly_negative(dp): is_interior = False if all_outside: is_exterior = True
is_interior = False break v3D, count = visible.target.vertices() xy_o, z_o = self.target.plane.project(self.origin) for i in range(0, count): xy_i, z_i = self.target.plane.project(v3D[i,:]) zoi = z_o * z_i if printing: print('vis-cmp: '+str((z_o, z_i, zoi))) if Basis.is_strictly_positive(zoi): is_farther = False is_coplanar = False if Basis.is_strictly_negative(zoi): is_coplanar = False return is_exterior, is_interior, is_farther, is_coplanar
jeremiah-c-leary/vhdl-style-guide
vsg/token/protected_type_declaration.py
Python
gpl-3.0
858
0
from vsg import parser class protected_keyword(parser.keyword): ''' unique_id = protected_type_declaration : protected_keyword ''' def __init__(self, sString): parser.keyword.__init__(self, sString) class end_keyword(parser.keyword): ''' unique_id = protected_type_declaration : end_keyword ''' def __init__(self, sString): parser.keyword.__init__(self, sString) class end_protected_keyword(parser.keyword): ''' unique_id = protected_type_declaration : end_protected_keyword ''' def __init__(self, sString): parser.keyword.__init__(self, sString)
class protected_type_simple_name(parser.simple_name): ''' unique_id = protected_type_declaration : protected_type_simple_name ''' def __init__(self, sStri
ng): parser.simple_name.__init__(self, sString)
sakset/getyourdata
getyourdata/organization/tests.py
Python
mit
28,360
0.000388
from django.test import TestCase from django.contrib.auth.models import Permission, User from django.core.urlresolvers import reverse from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from getyourdata.test import isDjangoTest, isSeleniumTest from getyourdata.testcase import LiveServerTestCase from organization.models import Organization, OrganizationDraft, Comment, AuthenticationField def find_element_by_xpath(self, xpath, click_it=False, wait_time=10): """ Finds element using XPATH """ element = WebDriverWait(self.selenium, wait_time).until( EC.presence_of_element_located(( By.XPATH, xpath)) ) if click_it: element.click() return element def create_simple_organization(): """ Creates simple organization with default information """ return Organization.objects.create( name="Some organization", email_address="valid@address.com", verified=True) def create_comment(user_organization, user_rating=1, user_message="some message"): """ Creates either default comment or custom comment """ return Comment.objects.create( organization=user_organization,
message=user_message, rating=user_rating ) @isDjangoTest() class OrganizationCreat
ionTests(TestCase): def setUp(self): self.auth_field1 = AuthenticationField.objects.create( name="some_number", title='Some number') def test_organization_with_valid_email_address_can_be_added(self): response = self.client.post( reverse("organization:new_organization"), {"name": "The Organization", "email_address": "valid@address.com", "authentication_fields": (self.auth_field1.id,), "g-recaptcha-response": "PASSED"}, follow=True) self.assertContains(response, "Organization profile created") organization = Organization.objects.all()[0] self.assertEquals(organization.name, "The Organization") self.assertEquals(organization.email_address, "valid@address.com") def test_organization_with_invalid_email_address_cant_be_added(self): response = self.client.post( reverse("organization:new_organization"), {"name": "The Organization", "email_address": "notavalidaddrss", "authentication_fields": (self.auth_field1.id,)}, follow=True) self.assertNotContains(response, "Organization profile created") self.assertEquals(Organization.objects.all().count(), 0) def test_organization_with_missing_contact_information_cant_be_added(self): response = self.client.post( reverse("organization:new_organization"), {"name": "The Organization", "authentication_fields": (self.auth_field1.id,)}, follow=True) self.assertContains( response, "Organization profile must contain either") self.assertEquals(Organization.objects.all().count(), 0) def test_organization_with_postal_information_can_be_added(self): response = self.client.post( reverse("organization:new_organization"), {"name": "The Organization", "address_line_one": "Fake Street 4", "postal_code": "00444", "country": "Finland", "authentication_fields": (self.auth_field1.id,), "g-recaptcha-response": "PASSED"}, follow=True) self.assertContains(response, "Organization profile created") organization = Organization.objects.all()[0] self.assertEquals(organization.name, "The Organization") self.assertEquals(organization.address_line_one, "Fake Street 4") self.assertEquals(organization.postal_code, "00444") self.assertEquals(organization.country, "Finland") def test_organization_with_missing_postal_information_cant_be_added(self): response = self.client.post( reverse("organization:new_organization"), {"name": "The Organization", "address_line_one": "Fake Street 4", "authentication_fields": (self.auth_field1.id,)}, follow=True) self.assertNotContains(response, "Organization profile created") self.assertEquals(Organization.objects.all().count(), 0) def test_organization_with_valid_postal_and_email_can_be_added(self): response = self.client.post( reverse("organization:new_organization"), {"name": "The Organization", "address_line_one": "Fake Street 4", "postal_code": "00444", "country": "Finland", "email_address": "fake@address.com", "authentication_fields": (self.auth_field1.id,), "g-recaptcha-response": "PASSED"}, follow=True) self.assertContains(response, "Organization profile created") organization = Organization.objects.all()[0] self.assertEquals(organization.name, "The Organization") self.assertEquals(organization.address_line_one, "Fake Street 4") self.assertEquals(organization.postal_code, "00444") self.assertEquals(organization.country, "Finland") self.assertEquals(organization.email_address, "fake@address.com") def test_organization_with_no_authentication_fields_cant_be_added(self): response = self.client.post( reverse("organization:new_organization"), {"name": "The Organization", "address_line_one": "Fake Street 4", "postal_code": "00444", "country": "Finland", "email_address": "fake@address.com"}, follow=True) self.assertNotContains(response, "Organization profile created") self.assertEquals(Organization.objects.all().count(), 0) def get_amount_of_organizations_on_page(self, page=-1): """ Get organizations that are in organization list on current page """ if page != -1: response = self.client.get( reverse("organization:list_organizations"), {"page": page}) else: response = self.client.get(reverse("organization:list_organizations")) return response.content.count("Some organization") @isDjangoTest() class OrganizationListingTests(TestCase): def test_no_organizations_listed_when_no_organizations_exists(self): response = self.client.get(reverse("organization:list_organizations")) self.assertContains(response, "No organizations yet") def test_existing_organizations_listed_on_page(self): for i in range(0, 5): create_simple_organization() self.assertEquals(get_amount_of_organizations_on_page(self), 5) def test_only_15_organizations_are_listed_per_page(self): for i in range(0, 20): create_simple_organization() self.assertEquals(get_amount_of_organizations_on_page(self), 15) def test_correct_amount_of_organizations_listed_per_page(self): for i in range(0, 25): create_simple_organization() self.assertEquals(get_amount_of_organizations_on_page(self), 15) self.assertEquals(get_amount_of_organizations_on_page(self, 2), 10) @isDjangoTest() class OrganizationViewTests(TestCase): def test_organization_postal_contact_details_displayed_if_available(self): organization = Organization.objects.create( name="The Organization", address_line_one="Fake Street 4", postal_code="00234", country="Finland", verified=True) self.auth_field1 = AuthenticationField.objects.create( name="some_number", title='Some number') response = self.client.get( reverse("organization:view_organization", args=(organization.id,))) self.assertContains(response, "Fake Street 4") self.assertContains(response, "00234") se
airpoint/script.gb.wiz
downloader.py
Python
gpl-2.0
618
0.02589
import xbmcgui import urllib def download(url, dest, dp = None): if not dp: dp = xbmcgui.DialogProgress() dp.create("Elmore...Maintenance","Downloading & Copying File",' ', ' ') dp.update(0) urllib.urlretrieve(url,dest,lambda nb, bs, fs, url=url: _pbhook(nb,bs,fs,url,dp)) def _pbhook(numblocks, blocksize, filesize, url, dp): try: pe
rcent = min((numblocks*blocksize*100)/filesize, 100) dp.update(percent) except: percent = 100
dp.update(percent) if dp.iscanceled(): raise Exception("Canceled") dp.close()
coll-gate/collgate
server/accession/fixtures/sequences.py
Python
mit
661
0.00607
# -*- coding: utf-8; -*- # # @file sequences # @brief collgate # @author Frédéric SCHERMA (INRA UMR1095) # @date 2018-01-09 # @copyright Copyright (c) 2018 INRA/CIRAD # @license MIT (see LICEN
SE file) # @details def fixture(fixture_manager, factory_manager): acc_seq = "CREATE SEQUENCE IF NOT EXISTS accession_naming_seq START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE CACHE 1;" bat_seq = "CREATE SEQUENCE IF NOT EXISTS batch_naming_seq START WITH 1 INCREMENT BY 1 NO MINVALUE NO MAXVALUE CACHE 1;" from django.db import connection with connection.cursor() as cursor: cursor.ex
ecute(acc_seq) cursor.execute(bat_seq)
stollcri/Research-Matrices
pgm/svd-pgm-avg.py
Python
mit
8,300
0.032289
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import argparse import numpy import math class Image: def __init__(self, matrix=[[]], width=0, height=0, depth=0): self.matrix = matrix self.width = width self.height = height self.depth = depth def set_width_and_height(self, width, height): self.width = width self.height = height self.matrix = [[0 for j in xrange(height)] for i in xrange(width)] def multiply_matrices(matrixU, matrixS, matrixVt, kmin, kmax, depth, rescale=False, contrast=False): matrixScopy = matrixS.copy() # when kmax is not 0 use the provided kmax if kmax > 0: i = 0 contrast_factor = (1.0 + (1 - (math.log(kmax, 2) / 10))) for t in numpy.nditer(matrixScopy, op_flags=["readwrite"]): if i < kmin or i >= kmax: t[...] = 0 else: if contrast: t[...] = t * contrast_factor #* math.pi / 2 i += 1 # when kmax is 0 then drop eigen values less than 1.0E-14 else: for t in numpy.nditer(matrixScopy, op_flags=["readwrite"]): if round(t, 14) <= 0: t[...] = 0 # recompose the trimmed SVD matrices back into matrix matrixComposed matrixComposed = numpy.dot(numpy.dot(matrixU, numpy.diag(matrixScopy)), matrixVt) # attempt the handle out of range values (TODO: pull out to own function) if rescale: curMin = 0 curMax = 0 # find min and max values for n in numpy.nditer(matrixComposed): if int(round(n)) < curMin: curMin = int(round(n)) if int(round(n)) > curMax: curMax = int(round(n)) # shift values up if curMax < depth and curMin < 0: shiftVal = depth - curMax for t in numpy.nditer(matrixComposed, op_flags=["readwrite"]): t[...] = int(round(t + shiftVal)) if t > depth: t[...] = depth elif t < 0: t[...] = 0 # shift values down elif curMax > depth and curMin > 0: shiftVal = curMin for t in numpy.nditer(matrixComposed, op_flags=["readwrite"]): t[...] = int(round(t - shiftVal)) if t > depth: t[...] = depth elif t < 0: t[...] = 0 # no chance to shift, just chop (TODO: perform some sort of scaling) else: for t in numpy.nditer(matrixComposed, op_flags=["readwrite"]): t[...] = int(round(t)) if t > depth: t[...] = depth elif t < 0: t[...] = 0 if contrast: depth_limit = depth # int(depth - (depth * .01)) for t in numpy.nditer(matrixComposed, op_flags=["readwrite"]): if t < depth_limit: t[...] = 0 return matrixComposed def write_matrices_to_file(matrixU, matrixS, matrixVt, kmin, kmax, file_handle, width, height, depth, rescale=False, contrast=False): """ Write a decomposed matrix to file uncompressed as it would show compressed Keyword Arguments: matrixU -- the U portion of the SVD matrixS -- the S (sigma) portion of the SVD matrixVt -- the V transpose portion of the SVD kmin -- the minimum k value to use for compresion (ignored if kmax = 0) kmax -- the maximum kvalue to use for compresion (find optimal if zero) filename -- the file to write to (stdout if blank) width -- the image width height -- the image height depth -- the maximum grey scale value (normally 255) rescale -- True to shift resulting image into 0 < n < depth bounds """ A = multiply_matrices(matrixU, matrixS, matrixVt, kmin, kmax, depth, rescale, contrast) pixelate_count = 4 for x in xrange(1, pixelate_count): U, s, Vt = numpy.linalg.svd(A, full_matrices=True) A = multiply_matrices(U, s, Vt, kmin, kmax, depth, rescale, contrast) file_handle.write("P2\n") file_handle.write("# Generated by Stoll \n") file_handle.write(str(width)) file_handle.write(" ") file_handle.write(str(height)) file_handle.write("\n") file_handle.write(str(depth)) file_handle.write("\n") for n in numpy.nditer(A): file_handle.write(str(int(round(n)))) file_handle.write(" ") file_handle.write("\n") def read_matrix_from_file(file_handle): """ Read an ASCII PGM file and create an Image object from it """ row = 0 col = 0 rownull = True image = Image() for line in file_handle: if line[0] == '#': pass elif line[0] == 'P' and line[1] == '2': pass elif image.width == 0 and image.height == 0: x = 0 y = 0 x, y = [int(n) for n in line.split()] image.set_width_and_height(x, y) elif image.depth == 0: image.depth = int(line) else: for value in line.split(): if col >= image.width: row += 1 col = 0 # rows which are all black become all white if rownull: for x in xrange(0, image.width): image.matrix[row][x] = image.depth rownull = True image.matrix[row][col] = value if int(value) != 0: rownull = False col += 1 # c
olumns which are all black become all white for x in xrange
(0, image.width): colnull = True for y in xrange(0, image.height): if int(image.matrix[y][x]) != 0: colnull = False if colnull: for y in xrange(0, image.height): image.matrix[y][x] = image.depth return image def process_svd(source_file_a, source_file_b, destination_file, kmin, kmax, rescale, contrast): """ Read from file provided on the command line or from stdin then save uncompressed representations of the SVD compressed version """ """ imagea = read_matrix_from_file(source_file_a) Ma = numpy.asmatrix(imagea.matrix) U, s, Vt = numpy.linalg.svd(Ma, full_matrices=True) """ pixelate_count = 2 + int(kmax / 2) imagea = read_matrix_from_file(source_file_a) Ma = numpy.asmatrix(imagea.matrix) # for x in xrange(1, pixelate_count): # Ua, sa, Vta = numpy.linalg.svd(Ma, full_matrices=True) # Ma = multiply_matrices(Ua, sa, Vta, kmin, kmax, imagea.depth, rescale, contrast) Ua, sa, Vta = numpy.linalg.svd(Ma, full_matrices=True) imageb = read_matrix_from_file(source_file_b) Mb = numpy.asmatrix(imageb.matrix) for x in xrange(1, pixelate_count): Ub, sb, Vtb = numpy.linalg.svd(Mb, full_matrices=True) Mb = multiply_matrices(Ub, sb, Vtb, kmin, kmax, imageb.depth, rescale, contrast) U = Ua for (x,y), value in numpy.ndenumerate(Ua): inta = Ua[x, y] intb = Ub[x, y] #intc = ((inta * 1.618) + (intb * 0.3)) / 1.9 #intc = (inta + intb) / 2.0 #intc = ((inta * 2) + intb) / 3.0 #intc = ((inta * 3) + intb) / 4.0 #intc = ((inta * 4) + intb) / 5.0 intc = ((inta * 5) + intb) / 6.0 #intc = ((inta * 6) + intb) / 7.0 #intc = ((inta * 7) + intb) / 8.0 U[x, y] = intc s = sa for (x,), value in numpy.ndenumerate(sa): inta = sa[x] intb = sb[x] #intc = ((inta * 1.618) + (intb * 0.3)) / 1.9 #intc = (inta + intb) / 2.0 #intc = ((inta * 2) + intb) / 3.0 #intc = ((inta * 3) + intb) / 4.0 #intc = ((inta * 4) + intb) / 5.0 intc = ((inta * 5) + intb) / 6.0 #intc = ((inta * 6) + intb) / 7.0 #intc = ((inta * 7) + intb) / 8.0 s[x] = intc Vt = Vta for (x,y), value in numpy.ndenumerate(Vta): inta = Vta[x, y] intb = Vtb[x, y] #intc = ((inta * 1.618) + (intb * 0.3)) / 1.9 #intc = (inta + intb) / 2.0 #intc = ((inta * 2) + intb) / 3.0 #intc = ((inta * 3) + intb) / 4.0 #intc = ((inta * 4) + intb) / 5.0 intc = ((inta * 5) + intb) / 6.0 #intc = ((inta * 6) + intb) / 7.0 #intc = ((inta * 7) + intb) / 8.0 Vt[x, y] = intc write_matrices_to_file(U, s, Vt, kmin, kmax, destination_file, imagea.width, imagea.height, imagea.depth, rescale, contrast) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("infile1", nargs='?', help="The source ASCII PGM file", type=argparse.FileType('r'), default=sys.stdin) parser.add_argument("infile2", nargs='?', help="The source ASCII PGM file", type=argparse.FileType('r'), default=sys.stdin) parser.add_argument("outfile", nargs='?', help="The destination ASCII PGM file", type=argparse.FileType('w'), default=sys.stdout) parser.add_argument("-j", "--kmin", help="The number of high k values to exlude", type=int, default=0) parser.add_argument("-k", "--kmax", help="The number k values to use", type=int, default=0) parser.add_argument("-s", "--scale", help="Fit resulting image depth into '0 < n < depth' bounds", action="store_true") parser.add_argument("-c", "--contrast", help="Improve high contrast images", action="store_true") args = parser.parse_args() try: process_svd(args.inf
anushbmx/kitsune
kitsune/access/templatetags/jinja_helpers.py
Python
bsd-3-clause
786
0
import jinja2 from django_jinja import library from kitsune.access import utils as access @jinja2.contextfunction @library.global_function def has_perm(context, perm, obj): """ Check if the user has a permission on a specific object. Returns boolean. """ return access.has_perm(context['request'].user, perm, obj) @jinja2.contextfunction @library.global_function def has_perm_or_owns(context, perm, obj, perm_obj, field_name='creator'): """ Check if the user has a permission or owns the object. Owne
rship is determined by comparing perm_obj.field_name to the user in context. """
user = context['request'].user if user.is_anonymous(): return False return access.has_perm_or_owns(user, perm, obj, perm_obj, field_name)
hhstore/flask-annotated
sanic/sanic-0.1.9/sanic/__init__.py
Python
mit
116
0
from
.sanic import Sanic from .blueprints import Blueprint __version__ = '0.1
.9' __all__ = ['Sanic', 'Blueprint']
Phononic/Sean_Lubner_250HWs
hw10/hw10.py
Python
mit
8,297
0.013619
import os import sqlite3 from flask import Flask, render_template, request, url_for, redirect, send_from_directory from flask.ext.sqlalchemy import SQLAlchemy from werkzeug import secure_filename from pybtex.database.input import bibtex from string import punctuation, whitespace app = Flask(__name__) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/bib.db' UPLOAD_FOLDER = './uploads' ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', 'bib', 'aux']) app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER app.debug = True def clear_uploads(): """ Delete all files in uploads folder """ for the_file in os.listdir(app.config['UPLOAD_FOLDER']): file_path = os.path.join(app.config['UPLOAD_FOLDER'], the_file) if os.path.isfile(file_path): os.remove(file_path) #--------------------- Home Page --------------------- @app.route("/") # Main page @app.route("/index.html") @app.route("/home") def home(): """ Home page """ # Specify links: links_list = [(url_for("upload_file"), 'Insert a Collection'), (url_for("query_database"), 'Run a Query')] collections_list = collections.keys() collections_list.sort() return render_template('base.html', window_title='BibTex Viewer | Main Page', page_title='Home Page', links=links_list, db_present=(len(collections) > 0), content='These are your available collections:', html_collections=collections_list) #--------------------- File Uploading & Parsing --------------------- duplicates = [False, False] # [collection_name_taken, file_name_taken] collections = {} # initialize a list of collections, each item of format: (name, file) db = SQLAlchemy(app) # initialize a database eng = db.create_engine(app.config['SQLALCHEMY_DATABASE_URI'], convert_unicode=True) # for running queries info4db = [] # initialize a bucket for the parsed data to be inserted into the database class Article(db.Model): """ Article database object, for inserting data """ __tablename__ = 'article' #@@@@@@ TAG @@@@@@ id = db.Column(db.Integer, primary_key=True) citation_tag = db.Column(db.String(100), unique=True) author_list = db.Column(db.String(500), unique=True) journal=db.Column(db.String(100),unique=True) volume=db.Column(db.Integer,unique=True) pages=db.Column(db.String(50),unique=True) year=db.Column(db.Integer,unique=True) title=db.Column(db.String(200),unique=True) collection=db.Column(db.String(50),unique=True) def __init__(self, citation_tag,author_list,journal,volume,pages,year,title,collection): self.citation_tag=citation_tag self.author_list=author_list self.journal=journal self.volume=volume self.pages=pages self.year=year self.title=title self.collection=collection db.create_all() # create database objects def add_entries(bib_data,database="/tmp/bib.db"): """ Adds the entries in "bib_data" to the database located at path "database" """ for item in bib_data: db.session.add(Article(item[0],item[1],item[2],item[3],item[4],item[5],item[6],item[7])) try: db.session.commit() except: pass def allowed_file(filename): """ Sanitizes filename entry """ return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in
ALLOWED_EXTENSIONS def is_bibtex(filename): """ returns True if the file is a bibtex file """ return filename.rsplit('.', 1)[1].lower() == 'bib' def parse_bibtex(bib_file_path_local, col_name): """ Parses a .bib file """ parser = bibtex.P
arser() bib_data = parser.parse_file(bib_file_path_local) def author2str(author): """ formats a Pybtex Person object into a string represntation """ return author.last()[0].strip('{}') + ", " + " ".join(author.bibtex_first()) gunk = punctuation + whitespace for tag, entry in bib_data.entries.items(): try: # authors author_list = str(" and ".join([author2str(x) for x in entry.persons['author']])) except: author_list = "Not Available" try: # journal journal = str(entry.fields['journal'].strip(gunk)) except: journal = "Not Available" try: # volume vol = str(entry.fields['volume'].strip(gunk)) except: vol = "Not Available" try: # pages pages = str(entry.fields['pages'].strip(gunk)) except: pages = "Not Available" try: # year year = str(entry.fields['year'].strip(gunk)) except: year = "Not Available" try: # title title = str(entry.fields['title'].strip(gunk)) except: title = "Not Available" info4db.append( (str(tag), author_list, journal, vol, pages, year, title, col_name) ) add_entries(info4db) def process_file(filename, col_name, the_file): """ Verify entries, parse the file, and create / add to a database from it """ if len(col_name.strip(' ')) == 0: # if no name provided for collection, use file name col_name = ''.join(secure_filename(the_file.filename).split('.')[:-1]) if col_name in collections.keys(): # if collection name already taken duplicates[0] = True duplicates[1] = False return redirect(url_for('upload_file')) if filename in os.listdir(app.config['UPLOAD_FOLDER']): # if filename name already taken duplicates[1] = True duplicates[0] = False return redirect(url_for('upload_file')) duplicates[0] = False duplicates[1] = False collections[col_name] = the_file # add to dictionary of databases the_file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) # Parse the file if bibtex if is_bibtex(the_file.filename): parse_bibtex(os.path.join(app.config['UPLOAD_FOLDER'], filename), col_name) return redirect(url_for('home')) @app.route('/insert_collection', methods=['GET', 'POST']) def upload_file(): """ Function for inserting a collection """ links_list = [(url_for("home"), 'Back to Home Page')] if request.method == 'POST': bib_file = request.files['col_file'] bib_name = request.form['col_name'] if bib_file and allowed_file(bib_file.filename): filename = secure_filename(bib_file.filename) return process_file(filename, bib_name, bib_file) else: return render_template('upload_file.html', links=links_list, db_present=(len(collections) > 0), dupes=duplicates) #--------------------- Querying--------------------- def run_query(query, database="/tmp/bib.db"): connection = eng.connect() sql_cmd="SELECT * FROM article WHERE " + query results = connection.execute(sql_cmd) for entry in db_info: results.append(entry) return results @app.route("/query", methods=['GET', 'POST']) def query_database(): """ Query the databse html page manager """ links_list = [(url_for("home"), 'Back to Home Page')] if request.method == 'POST': query_raw = request.form['query_str'] query_results = info4db #run_query(query_raw) #@@@@@@ TAG @@@@@@ return render_template('query.html', links=links_list, db_present=(len(collections) > 0), query_preset=query_raw, query_str = query_raw, results = info4db) else: return render_template('query.html', links=links_list, db_present=(len(collections) > 0)) #--------------------- Main Program --------------------- if __name__ == "__main__": clear_uploads() # Empty out the uploads folder from the previous session if
bllli/tsxyAssistant
app/templates/translate.py
Python
gpl-3.0
1,644
0.000717
# coding=utf-8 """html to jinja2首席挖洞官 将本目录/子目录下的html文件中使用到的 *静态文件* 的URL改为使用jinja2 + flask url_for渲染 href="css/base.css" -> href="{{ url_for("static", filename="css/base.css") }}" 挖洞前会在文件同目录做一个.bak后缀名的备份文件。 Usage: $ cd my_project $ python2 translate.py """ from __future__ import print_function import re import os import shutil types = ['css', 'html'] # href="css/base.css" # src="img/a39-1.png" def bak
(filename): """备份 数据无价 谨慎操作 :type filename: str 文件名 """ if os.path.exists(filename + ".bak"): return # 如果已有备份文件 则不再重复生成备份文件 if os.path.isfile(filename): shutil.copy(filename, filename + ".bak") def rollback(): """回滚
暂时用不到 先不写了 """ pass def translate(filename): with open(filename, 'r+') as f: replaced = re.sub(r'(href|src)="(css|img|font|js)/(.*?)"', r'\g<1>="{{ url_for("static", filename="\g<2>/\g<3>") }}"', f.read()) f.seek(0) f.write(replaced) if __name__ == '__main__': for paths, subs, files in os.walk(os.getcwd()): # 遍历本路径下文件 for filename in files: if filename.split('.')[-1] not in types: # 后缀名不在翻译后缀名列表中的,不进行翻译 continue fullname = os.path.join(paths, filename) print("translating " + fullname) bak(fullname) translate(fullname)
shaunstanislaus/sshuttle
src/firewall.py
Python
lgpl-2.1
29,513
0.002101
import errno import socket import select import signal import struct import compat.ssubprocess as ssubprocess import ssyslog import sys import os import re from helpers import log, debug1, debug3, islocal, Fatal, family_to_string, \ resolvconf_nameservers from fcntl import ioctl from ctypes import c_char, c_uint8, c_uint16, c_uint32, Union, Structure, \ sizeof, addressof, memmove # python doesn't have a definition for this IPPROTO_DIVERT = 254 def nonfatal(func, *args): try: func(*args) except Fatal, e: log('error: %s\n' % e) def ipt_chain_exists(family, table, name): if family == socket.AF_INET6: cmd = 'ip6tables' elif family == socket.AF_INET: cmd = 'iptables' else: raise Exception('Unsupported family "%s"' % family_to_string(family)) argv = [cmd, '-t', table, '-nL'] p = ssubprocess.Popen(argv, stdout=ssubprocess.PIPE) for line in p.stdout: if line.startswith('Chain %s ' % name): return True rv = p.wait() if rv: raise Fatal('%r returned %d' % (argv, rv)) def _ipt(family, table, *args): if family == socket.AF_INET6: argv = ['ip6tables', '-t', table] + list(args) elif family == socket.AF_INET: argv = ['iptables', '-t', table] + list(args) else: raise Exception('Unsupported family "%s"' % family_to_string(family)) debug1('>> %s\n' % ' '.join(argv)) rv = ssubprocess.call(argv) if rv: raise Fatal('%r returned %d' % (argv, rv)) _no_ttl_module = False def _ipt_ttl(family, *args): global _no_ttl_module if not _no_ttl_module: # we avoid infinite loops by generating server-side connections # with ttl 42. This makes the client side not recapture those # connections, in case client == server. try: argsplus = list(args) + ['-m', 'ttl', '!', '--ttl', '42'] _ipt(family, *argsplus) except Fatal: _ipt(family, *args) # we only get here if the non-ttl attempt succeeds log('sshuttle: warning: your iptables is missing ' 'the ttl module.\n') _no_ttl_module = True else: _ipt(family, *args) # We name the chain based on the transproxy port number so that it's possible # to run multiple copies of sshuttle at the same time. Of course, the # multiple copies shouldn't have overlapping subnets, or only the most- # recently-started one will win (because we use "-I OUTPUT 1" instead of # "-A OUTPUT"). def do_iptables_nat(port, dnsport, family, subnets, udp): # only ipv4 supported with NAT if family != socket.AF_INET: raise Exception( 'Address family "%s" unsupported by nat method' % family_to_string(family)) if udp: raise Exception("UDP not supported by nat method") table = "nat" def ipt(*args): return _ipt(family, table, *args) def ipt_ttl(*args): return _ipt_ttl(family, table, *args) chain = 'sshuttle-%s' % port # basic cleanup/setup of chains if ipt_chain_exists(family, table, chain): nonfatal(ipt, '-D', 'OUTPUT', '-j', chain) nonfatal(ipt, '-D', 'PREROUTING', '-j', chain) nonfatal(ipt, '-F', chain) ipt('-X', chain) if subnets or dnsport: ipt('-N', chain) ipt('-F', chain) ipt('-I', 'OUTPUT', '1', '-j', chain) ipt('-I', 'PREROUTING', '1', '-j', chain) if subnets: # create new subnet entries. Note that we're sorting in a very # particular order: we need to go from most-specific (largest swidth) # to least-specific, and at any given level of specificity, we want # excludes to come first. That's why the columns are in such a non- # intuitive order. for f, swidth, sexclude, snet \ in sorted(subnets, key=lambda s: s[1], reverse=True): if sexclude: ipt('-A', chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-p', 'tcp') else: ipt_ttl('-A', chain, '-j', 'REDIRECT', '--dest', '%s/%s' % (snet, swidth), '-p', 'tcp', '--to-ports', str(port)) if dnsport: nslist = resolvconf_nameservers() for f, ip in filter(lambda i: i[0] == family, nslist): ipt_ttl('-A', chain, '-j', 'REDIRECT', '--dest', '%s/32' % ip, '-p', 'udp', '--dport', '53', '--to-ports', str(dnsport)) def do_iptables_tproxy(port, dnsport, family, subnets, udp): if family not in [socket.AF_INET, socket.AF_INET6]: raise Exception( 'Add
ress family "%s" unsu
pported by tproxy method' % family_to_string(family)) table = "mangle" def ipt(*args): return _ipt(family, table, *args) def ipt_ttl(*args): return _ipt_ttl(family, table, *args) mark_chain = 'sshuttle-m-%s' % port tproxy_chain = 'sshuttle-t-%s' % port divert_chain = 'sshuttle-d-%s' % port # basic cleanup/setup of chains if ipt_chain_exists(family, table, mark_chain): ipt('-D', 'OUTPUT', '-j', mark_chain) ipt('-F', mark_chain) ipt('-X', mark_chain) if ipt_chain_exists(family, table, tproxy_chain): ipt('-D', 'PREROUTING', '-j', tproxy_chain) ipt('-F', tproxy_chain) ipt('-X', tproxy_chain) if ipt_chain_exists(family, table, divert_chain): ipt('-F', divert_chain) ipt('-X', divert_chain) if subnets or dnsport: ipt('-N', mark_chain) ipt('-F', mark_chain) ipt('-N', divert_chain) ipt('-F', divert_chain) ipt('-N', tproxy_chain) ipt('-F', tproxy_chain) ipt('-I', 'OUTPUT', '1', '-j', mark_chain) ipt('-I', 'PREROUTING', '1', '-j', tproxy_chain) ipt('-A', divert_chain, '-j', 'MARK', '--set-mark', '1') ipt('-A', divert_chain, '-j', 'ACCEPT') ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain, '-m', 'tcp', '-p', 'tcp') if subnets and udp: ipt('-A', tproxy_chain, '-m', 'socket', '-j', divert_chain, '-m', 'udp', '-p', 'udp') if dnsport: nslist = resolvconf_nameservers() for f, ip in filter(lambda i: i[0] == family, nslist): ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1', '--dest', '%s/32' % ip, '-m', 'udp', '-p', 'udp', '--dport', '53') ipt('-A', tproxy_chain, '-j', 'TPROXY', '--tproxy-mark', '0x1/0x1', '--dest', '%s/32' % ip, '-m', 'udp', '-p', 'udp', '--dport', '53', '--on-port', str(dnsport)) if subnets: for f, swidth, sexclude, snet \ in sorted(subnets, key=lambda s: s[1], reverse=True): if sexclude: ipt('-A', mark_chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-m', 'tcp', '-p', 'tcp') ipt('-A', tproxy_chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-m', 'tcp', '-p', 'tcp') else: ipt('-A', mark_chain, '-j', 'MARK', '--set-mark', '1', '--dest', '%s/%s' % (snet, swidth), '-m', 'tcp', '-p', 'tcp') ipt('-A', tproxy_chain, '-j', 'TPROXY', '--tproxy-mark', '0x1/0x1', '--dest', '%s/%s' % (snet, swidth), '-m', 'tcp', '-p', 'tcp', '--on-port', str(port)) if sexclude and udp: ipt('-A', mark_chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-m', 'udp', '-p', 'udp') ipt('-A', tproxy_chain, '-j', 'RETURN', '--dest', '%s/%s' % (snet, swidth), '-m', 'udp', '-p', 'udp') elif udp: ipt('-A', mark_chain
ToonTownInfiniteRepo/ToontownInfinite
toontown/cogdominium/CogdoCraneGameSpec.py
Python
mit
1,302
0.004608
from toontown.co
ghq.SpecImports import * GlobalEntities = {1000: {'type': 'levelMgr', 'name': 'LevelMgr', 'comment': '', 'parentEntId': 0, 'modelFilename': 'phase_10/models/cogHQ/EndVault.bam'}, 1001: {'type': 'editMgr', 'name': 'EditMgr', 'parentEntId': 0, 'insertEntity': None,
'removeEntity': None, 'requestNewEntity': None, 'requestSave': None}, 0: {'type': 'zone', 'name': 'UberZone', 'comment': '', 'parentEntId': 0, 'scale': 1, 'description': '', 'visibility': []}, 10001: {'type': 'cogdoCraneCogSettings', 'name': '<unnamed>', 'comment': '', 'parentEntId': 0, 'CogFlyAwayDuration': 4.0, 'CogFlyAwayHeight': 50.0, 'CogMachineInteractDuration': 2.0, 'CogSpawnPeriod': 10.0, 'CogWalkSpeed': 12.07161265369133}, 10000: {'type': 'cogdoCraneGameSettings', 'name': '<unnamed>', 'comment': '', 'parentEntId': 0, 'EmptyFrictionCoef': 0.1, 'GameDuration': 180.0, 'Gravity': -32, 'MagnetMass': 1.0, 'MoneyBagGrabHeight': -4.1, 'RopeLinkMass': 1.0}} Scenario0 = {} levelSpec = {'globalEntities': GlobalEntities, 'scenarios': [Scenario0]}
cheral/orange3-text
orangecontrib/text/tests/test_newspaper.py
Python
bsd-2-clause
1,311
0.009916
import unittest import os import csv from orangecontrib.text.scraper import _get_info from unittest.mock import patch from contextlib import contextmanager import tempfile class MockUrlOpen: def __init__(self, filepath): self.data = [] with open(filepath, 'r') as f: reader=csv.reader(f,delimiter='\t') for row in reader: self.data=row
self.url=row[4] try: next(f) except StopIteration: # Last empty line is sometimes missing pass def __call__(self, url): @contextmanager def cm(): yield self return cm() filename='article_cache.csv' filepath=os.path.join(os.path.dirname(__file__), filename) mock_urllib = MockUrlOpen(filepath) @pat
ch('urllib.request.urlopen', mock_urllib) class NewspaperTests(unittest.TestCase): def setUp(self): self.tmp = tempfile.NamedTemporaryFile(delete=False) os.remove(self.tmp.name) self.cache_path = self.tmp.name def test_get_info(self): #checks whether article, title, date, author, url are same scraped_data, is_cached = _get_info(mock_urllib.url) self.assertEqual(scraped_data , mock_urllib.data) self.assertTrue(is_cached)
glumu/django-redis-cluster
django_redis_cluster/__init__.py
Python
bsd-3-clause
425
0.011765
#coding:utf8 from django.core.cache import caches def get_cache(alias): return caches[alias] def get_redis_connection(alias='default', write=True): """ Helper used for obtain a
raw redis client. """ cache = get_cache(alias) if not hasattr(cache.client, 'get_client'): raise NotImplementedError("This backend does not supports this feature") return
cache.client.get_client(write)
werebus/dotfiles
bin/shorten_path.py
Python
mit
772
0.006477
#!/usr/bin/env python import sys import os import re try: path = sys.argv[1] length = int(sys.argv[2]) except: print >>sys.stderr, "Usage: $0 <path> <length>" sys.exit(1) path = re.sub(os.getenv('HOME'), '~', path) while len(path) > length: dirs = path.split("/"); # Find the longest directory in the path. max_index = -1 max_lengt
h = 3 for i in range(len(dirs) - 1): if len(dirs[i]) > max
_length: max_index = i max_length = len(dirs[i]) # Shorten it by one character. if max_index >= 0: dirs[max_index] = dirs[max_index][:max_length-3] + ".." path = "/".join(dirs) # Didn't find anything to shorten. This is as good as it gets. else: break print(path)
matt77hias/Clipping
src/surfacearea.py
Python
gpl-3.0
2,427
0.005356
import numpy as np ############################################################################### ## Surface Area ## --------------------------------- ## Planar polygon ############################################################################### # Theorem of Green #------------------------------------------------------------------------------ # integral_contour(L dx + M dy) = integral_area((dM/dx - dL/dy) dx dy) # contour = oriented, piecewise smooth, simple closed curve in a plane # area = region bounded by perimeter # L, M = functions of (x,y) defined on an open region containing area with continuous partial derivatives # # Application: # Planimeter # integral_contour(-y dx + x dy) = integral_area((dx/dx - -dy/dy) dx dy) = 2 area def area(p_vs, n=None): if (len(p_vs) < 3): return 0.0 dim = p_vs[0].shape[0] if dim == 2: return _area2D(p_vs) elif dim == 3: return _area3D(p_vs, n=n) def _area2D(p_vs): area = 0.0 nb_p_vs = len(p_vs) #for j in range(nb_p_vs): # p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs] # p_v2 = p_vs[j] # area += + p_v1[0]*p_v2[1] - p_v2[0]*p_v1[1] for j in range(nb_p_vs):
p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs] p
_v2 = p_vs[j] p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs] area += p_v2[0] * (p_v3[1] - p_v1[1]) return 0.5 * abs(area) def _area3D(p_vs, n): area = 0.0 nb_p_vs = len(p_vs) ax = abs(n[0]) ay = abs(n[1]) az = abs(n[2]) if (ax > ay and ax > az): lca = 0 elif (ay > az): lca = 1 else: lca = 2 an = np.sqrt(ax*ax + ay*ay + az*az) if lca == 0: for j in range(nb_p_vs): p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs] p_v2 = p_vs[j] p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs] area += p_v2[1] * (p_v3[2] - p_v1[2]) area *= (an / n[0]) elif lca == 1: for j in range(nb_p_vs): p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs] p_v2 = p_vs[j] p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs] area += p_v2[2] * (p_v3[0] - p_v1[0]) area *= (an / n[1]) else: for j in range(nb_p_vs): p_v1 = p_vs[(j+nb_p_vs-1) % nb_p_vs] p_v2 = p_vs[j] p_v3 = p_vs[(j+nb_p_vs+1) % nb_p_vs] area += p_v2[0] * (p_v3[1] - p_v1[1]) area *= (an / n[2]) return 0.5 * abs(area)
mprinc/FeelTheSound
src/PoC/fft.py
Python
cc0-1.0
4,053
0.033062
#!/usr/bin/env python # 8 band Audio equaliser from wav file # import alsaaudio as aa # import smbus from struct import unpack import numpy as np import wave from time import sleep import sys ADDR = 0x20 #The I2C address of MCP23017 DIRA = 0x00 #PortA I/O direction, by pin. 0=output, 1=input DIRB = 0x01 #PortB I/O direction, by pin. 0=output, 1=input BANKA = 0x12 #Register address for Bank A BANKB = 0x13 #Register address for Bank B # bus=smbus.SMBus(0) #Use '1' for newer Pi boards; # #Set up the 23017 for 16 output pins # bus.write_byte_data(ADDR, DIRA, 0); #all zeros = all outputs on Bank A # bus.write_byte_data(ADDR, DIRB, 0); #all zeros = all outputs on Bank B # def TurnOffLEDS (): # bus.write_byte_data(ADDR, BANKA, 0xFF) #set all columns high # bus.write_byte_data(ADDR, BANKB, 0x00) #set all rows low # def Set_Column(row, col): # bus.write_byte_data(ADDR, BANKA, col) # bus.write_byte_data(ADDR, BANKB, row) # # Initialise matrix # TurnOffLEDS() matrix = [0,0,0,0,0,0,0,0] power = [] # weighting = [2,2,8,8,16,32,64,64] # Change these according to taste weighting = [2,2,2,2,4,4,8,8] # Change these according to taste # Set up audio #wavfile = wave.op
en('test_stereo_16000Hz_16bit_PCM.wav','r') #wavfile = wave.open('Media-Convert_test5_PCM_Stereo_VBR_8SS_44100Hz.wav','r') wavfile = wave.open('Media-Convert_test2_PCM_Mono_VBR_8SS_48000Hz.wav','r') sample_rate = wavfile.getframerate() no_channels = wavfile.getnchannels() chunk = 4096 # Use a multiple of 8 # output = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL) # output.setchannels(no_channels) # output.setrate(sample_rate) # output.setformat(aa.PCM_FORMAT_S16_LE) # output.setperiodsize(chunk)
# Return power array index corresponding to a particular frequency def piff(val): return int(2*chunk*val/sample_rate) def print_intensity(matrix): levelFull = "||||||||"; levelEmpty = " "; levelStr = ""; for level in matrix: #level = 0; levelStr += levelFull[0: level] + levelEmpty [0:8-(level)] + " "; sys.stdout.write("\rlevel: " + levelStr); sys.stdout.flush(); def calculate_levels(data, chunk, sample_rate): #print ("[calculate_levels] chunk=%s, sample_rate: %s, len(data)=%s" % (chunk, sample_rate, len(data))); if(len(data) != chunk): print ("\n[calculate_levels] skiping: chunk=%s != len(data)=%s" % (chunk, len(data))); return None; global matrix # Convert raw data (ASCII string) to numpy array data = unpack("%dh"%(len(data)/2),data) data = np.array(data, dtype='h') # Apply FFT - real data fourier=np.fft.rfft(data) # Remove last element in array to make it the same size as chunk fourier=np.delete(fourier,len(fourier)-1) # Find average 'amplitude' for specific frequency ranges in Hz power = np.abs(fourier) matrix[0]= int(np.mean(power[piff(0) :piff(156):1])) matrix[1]= int(np.mean(power[piff(156) :piff(313):1])) matrix[2]= int(np.mean(power[piff(313) :piff(625):1])) matrix[3]= int(np.mean(power[piff(625) :piff(1250):1])) matrix[4]= int(np.mean(power[piff(1250) :piff(2500):1])) matrix[5]= int(np.mean(power[piff(2500) :piff(5000):1])) matrix[6]= int(np.mean(power[piff(5000) :piff(10000):1])) # Produces error, I guess to low sampling rate of the audio file # matrix[7]= int(np.mean(power[piff(10000):piff(20000):1])) # Tidy up column values for the LED matrix matrix=np.divide(np.multiply(matrix,weighting),1000000) # Set floor at 0 and ceiling at 8 for LED matrix matrix=matrix.clip(0,8) return matrix # Process audio file print "Processing....." data = wavfile.readframes(chunk) while data != '': # output.write(data) matrix = calculate_levels(data, chunk,sample_rate) if matrix == None: next; print_intensity(matrix); # for i in range (0,8): # Set_Column((1<<matrix[i])-1,0xFF^(1<<i)) sleep(0.1); data = wavfile.readframes(chunk) # TurnOffLEDS() # =========================
ypflll/wiringX
python/examples/blink.py
Python
gpl-3.0
1,039
0.013474
#!/usr/bin/env python # Copyright (c) 2014 CurlyMo <curlymoo1@gmail.com> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT AN
Y WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import sys from time import sleep from wiringX import gpio gpio.s
etup(); gpio.pinMode(gpio.PIN0, gpio.OUTPUT); print gpio.platform(); print gpio.I2CRead(0x10); try: while True: gpio.digitalWrite(gpio.PIN0, gpio.LOW); sleep(1); gpio.digitalWrite(gpio.PIN0, gpio.HIGH); sleep(1); except KeyboardInterrupt: pass
Fillll/reddit2telegram
reddit2telegram/channels/~inactive/r_haikuos/app.py
Python
mit
137
0.007299
#encodi
ng:utf-8 subreddit = 'haikuOS' t_channel = '@r_haikuos' def send_post(submission, r2t): return r2t.send_simple(submission)
caisq/tensorflow
tensorflow/contrib/autograph/pyct/static_analysis/cfg.py
Python
apache-2.0
15,704
0.005349
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Control flow graph analysis. Given a Python AST we construct a control flow graph, with edges both to the next and previous statements (so it can easily walk the graph both ways). Its nodes contain the AST of the statements. It can then perform forward or backward analysis on this CFG. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import functools import operator import gast from tensorflow.contrib.autograph.pyct import anno from tensorflow.contrib.autograph.pyct.static_analysis import activity class CfgNode(object): """A node in the CFG.""" __slots__ = ['next', 'value', 'prev'] def __init__(self, value): self.next = set() self.prev = set() self.value = value class Cfg(namedtuple('Cfg', ['entry', 'exit'])): """A Control Flow Graph. Each statement is represented as a node. For control flow statements such as conditionals and loops the conditional itself is a node which either branches or cycles, respectively. Attributes: entry: The entry node, which contains the `gast.arguments` node of the function definition. exit: The exit node. This node is special because it has no value (i.e. no corresponding AST node). This is because Python functions can have multiple return statements. """ pass class CfgBuilder(gast.NodeVisitor): """Construct a control flow graph. Construct a CFG starting from a FunctionDef node. Usage: cfg_obj = CfgBuilder().build_cfg(fndef_node) """ def __init__(self): # The current leaves of the CFG self.current_leaves = [] # TODO(alexbw): generalize to break, return, continue, yield, etc. # A stack of lists, tracking continue statements self.continue_ = [] # A stack of lists tracking break nodes self.break_ = [] def set_current_leaves(self, cfg_node): """Link this cfg_node to the current leaves. This is the central function for building the CFG. It links the current head cfg_nodes to the passed cfg_node. It then resets the head to the passed cfg_node. Args: cfg_node: A CfgNode instance. """ for head in self.current_leaves: head.next.add(cfg_node) # While we're linking the CFG forward, add backlink
s
cfg_node.prev.add(head) self.current_leaves = [cfg_node] def build_cfg(self, node): """Build a CFG for a function. Implementation of building a CFG for dataflow analysis. See, e.g.: https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf Args: node: A function definition the body of which to analyze. Returns: A CFG object. Raises: TypeError: If the input is not a function definition. """ if not isinstance(node, gast.FunctionDef): raise TypeError('input must be a function definition') entry_cfg_node = CfgNode(node.args) self.current_leaves = [entry_cfg_node] self.visit_statements(node.body) exit_cfg_node = CfgNode(None) self.set_current_leaves(exit_cfg_node) return Cfg(entry_cfg_node, exit_cfg_node) def visit_statements(self, nodes): for node in nodes: # Check for control flow if isinstance(node, (gast.For, gast.While, gast.If, gast.Try, gast.Break, gast.Continue, gast.With)): self.visit(node) else: expr = CfgNode(node) self.set_current_leaves(expr) def generic_visit(self, node): raise ValueError('unknown control flow') def visit_If(self, node): # TODO(alexbw): change this to use immutable tuples instead of lists # The current head will hold the conditional test = CfgNode(node.test) self.set_current_leaves(test) # Handle the body self.visit_statements(node.body) body_exit = self.current_leaves self.current_leaves = [test] # Handle the orelse self.visit_statements(node.orelse) self.current_leaves.extend(body_exit) def visit_While(self, node): test = CfgNode(node.test) self.set_current_leaves(test) # Start a new level of nesting self.break_.append([]) self.continue_.append([]) # Handle the body self.visit_statements(node.body) body_exit = self.current_leaves self.current_leaves.extend(self.continue_.pop()) self.set_current_leaves(test) # Handle the orelse self.visit_statements(node.orelse) # The break statements and the test go to the next node self.current_leaves.extend(self.break_.pop()) # Body and orelse statements can reach out of the loop self.current_leaves.extend(body_exit) def visit_For(self, node): iter_ = CfgNode(node.iter) self.set_current_leaves(iter_) self.break_.append([]) self.continue_.append([]) self.visit_statements(node.body) body_exit = self.current_leaves self.current_leaves.extend(self.continue_.pop()) self.set_current_leaves(iter_) # Handle the orelse self.visit_statements(node.orelse) # The break statements and the test go to the next node self.current_leaves.extend(self.break_.pop()) # Body and orelse statements can reach out of the loop self.current_leaves.extend(body_exit) def visit_Break(self, node): self.break_[-1].extend(self.current_leaves) self.current_leaves[:] = [] def visit_Continue(self, node): self.continue_[-1].extend(self.current_leaves) self.current_leaves[:] = [] def visit_Try(self, node): self.visit_statements(node.body) body = self.current_leaves handlers = [] for handler in node.handlers: self.current_leaves = body[:] self.visit_statements(handler.body) handlers.extend(self.current_leaves) self.current_leaves = body self.visit_statements(node.orelse) self.current_leaves = handlers + self.current_leaves self.visit_statements(node.finalbody) def visit_With(self, node): for item in node.items: self.set_current_leaves(CfgNode(item)) self.visit_statements(node.body) # TODO(alexbw): once CFG analysis occurs at a block level, # this extra class will not be necessary class PropagateAnalysis(gast.NodeVisitor): """Port analysis annotations from statements to their enclosing blocks.""" def __init__(self, analysis): self.transfer_fn = analysis.transfer_fn self.in_label = analysis.in_label self.out_label = analysis.out_label super(PropagateAnalysis, self).__init__() def visit_If(self, node): # Depth-first. self.generic_visit(node) incoming = anno.getanno(node.body[0], self.in_label) incoming |= anno.getanno(node.test, self.in_label) outgoing = anno.getanno(node.body[-1], self.out_label) outgoing |= anno.getanno(node.test, self.out_label) if node.orelse: orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label) outgoing = self.transfer_fn(outgoing, orelse_outgoing) anno.setanno(node, self.in_label, incoming) anno.setanno(node, self.out_label, outgoing) def visit_For(self, node): self.generic_visit(node) incoming = set(anno.getanno(node.body[0], self.in_label)) incoming -= set((anno.getanno(node.target, anno.Basic.QN),)) outgoing = anno.getanno(node.body[-1], self.out_label) if node.orelse: orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label) outgoing = self.transfer_fn(outgoing, orelse_outgoing) anno.setanno(node, self.in_label, frozenset(incoming)) anno.setann
delete/estofadora
estofadora/core/forms.py
Python
mit
1,354
0.00074
# conding: utf-8 from django import forms from .models import Contact class ContactForm(forms.ModelForm): class Meta: model = Contact fields = ['name', 'email', 'telephone', 'subject', 'description'] def clean(self): cleaned_data = super(ContactForm, self).clean() email = cleaned_data.get('email') telephone = cleaned_data.get('telephone') if not email and not telephone: raise forms.ValidationError( 'Por favor, entre com pelo menos uma opção de contato. Email ou telefone.' ) def __init__(self, *args, **kwargs): super(ContactForm, self).__init__(*args, **kwargs) self.fields['name'].widget.attr
s.update( {'class': 'form-control', 'placeholder': 'Nome completo'} ) self.fields['email'].widget.attrs.update( {'class': 'form-control', 'placeholder': 'Seu email'} )
self.fields['telephone'].widget.attrs.update( {'class': 'form-control', 'placeholder': 'Seu telefone com DDD'} ) self.fields['subject'].widget.attrs.update( {'class': 'form-control', 'placeholder': 'Assunto da mensagem'} ) self.fields['description'].widget.attrs.update( {'class': 'form-control', 'placeholder': 'Sua mensagem'} )
Molecular-Image-Recognition/Molecular-Image-Recognition
code/rmgpy/quantity.py
Python
mit
30,455
0.005845
#!/usr/bin/env python # encoding: utf-8 ################################################################################ # # RMG - Reaction Mechanism Generator # # Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu), # Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the 'Software'), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # ################################################################################ """ This module contains classes and methods for working with physical quantities, particularly the :class:`Quantity` class for representing physical quantities. """ import numpy import quantities as pq import rmgpy.constants as constants from rmgpy.exceptions import QuantityError ################################################################################ # Explicitly set the default units to SI pq.set_default_units('si') # These units are not defined by the quantities package, but occur frequently # in data handled by RMG, so we define them manually pq.UnitQuantity('kilocalories', pq.cal*1e3, symbol='kcal') pq.UnitQuantity('kilojoules', pq.J*1e3, symbol='kJ') pq.UnitQuantity('kilomoles', pq.mol*1e3, symbol='kmol') pq.UnitQuantity('molecule', pq.mol/6.02214179e23, symbol='molecule') pq.UnitQuantity('molecules', pq.mol/6.02214179e23, symbol='molecules') pq.UnitQuantity('debye', 1.0/(constants.c*1e21)*pq.C*pq.m, symbol='De') ################################################################################ # Units that should not be used in RMG-Py: NOT_IMPLEMENTED_UNITS = [ 'degC', 'C', 'degF', 'F', 'degR', 'R' ] ################################################################################ class Units(object): """ The :class:`Units` class provides a representation of the units of a physical quantity. The attributes are: =================== ======================================================== Attribute Description =================== ======================================================== `units` A string representation of the units =================== ======================================================== Functions that return the conversion factors to and from SI units are provided. """ # A dict of conversion factors (to SI) for each of the frequent units # Here we also define that cm^-1 is not to be converted to m^-1 (or Hz, J, K, etc.) conversionFactors = {'cm^-1': 1.0} def __init__(self, units=''): if units in NOT_IMPLEMENTED_UNITS: raise NotImplementedError( 'The units {} are not yet supported. Please choose SI units.'.format(units) ) self.units = units def getConversionFactorToSI(self): """ Return the conversion factor for converting a quantity in a given set of`units` to the SI equivalent units. """ try: # Process several common units manually for speed factor = Units.conversionFactors[self.units] except KeyError: # Fall back to (slow!) quantities package for less common units factor = float(pq.Quantity(1.0, self.units).simplified) # Cache the conversion factor so we don't ever need to use # quantities to compute it again Units.conversionFactors[self.units] = factor return factor def getConversionFactorFromSI(self)
: """ Return the conversion factor for converting a quantity to a given set of `units` from the SI equivalent units. """ return 1.0 / self.getConversionFactorToSI() ################################################################################ class ScalarQuantity(Units): """ The :class:`ScalarQuantity` class provides a representation of a scalar physic
al quantity, with optional units and uncertainty information. The attributes are: =================== ======================================================== Attribute Description =================== ======================================================== `value` The numeric value of the quantity in the given units `units` The units the value was specified in `uncertainty` The numeric uncertainty in the value in the given units (unitless if multiplicative) `uncertaintyType` The type of uncertainty: ``'+|-'`` for additive, ``'*|/'`` for multiplicative `value_si` The numeric value of the quantity in the corresponding SI units `uncertainty_si` The numeric value of the uncertainty in the corresponding SI units (unitless if multiplicative) =================== ======================================================== It is often more convenient to perform computations using SI units instead of the given units of the quantity. For this reason, the SI equivalent of the `value` attribute can be directly accessed using the `value_si` attribute. This value is cached on the :class:`ScalarQuantity` object for speed. """ def __init__(self, value, units='', uncertainty=None, uncertaintyType='+|-'): Units.__init__(self, units) self.value = value self.uncertaintyType = uncertaintyType self.uncertainty = float(uncertainty) if uncertainty is not None else 0.0 def __reduce__(self): """ Return a tuple of information used to pickle the scalar quantity. """ return (ScalarQuantity, (self.value, self.units, self.uncertainty, self.uncertaintyType)) def __str__(self): """ Return a string representation of the scalar quantity. """ result = '{0:g}'.format(self.value) if self.uncertainty != 0.0: result += ' {0} {1:g}'.format(self.uncertaintyType, self.uncertainty) if self.units != '': result += ' {0}'.format(self.units) return result def __repr__(self): """ Return a string representation that can be used to reconstruct the scalar quantity. """ if self.units == '' and self.uncertainty == 0.0: return '{0:g}'.format(self.value) else: result = '({0:g},{1!r}'.format(self.value, self.units) if self.uncertainty != 0.0: result += ',{0!r},{1:g}'.format(self.uncertaintyType, self.uncertainty) result += ')' return result def copy(self): """ Return a copy of the quantity. """ return ScalarQuantity(self.value, self.units, self.uncertainty, self.uncertaintyType) def getValue(self): """ The numeric value of the quantity, in the given units """ return self.value_si * self.getConversionFactorFromSI() def setValue(self, v): self.value_si = float(v) * self.getConversionFactorToSI() value = property(getVal
jgcobb3/growth-yield-batch
scripts/blm/climate_suitability/get_species_climate_combos.py
Python
bsd-3-clause
1,011
0.004946
#!/bin/python import sqlite3 import climquery if __name__ == "__main__": # RCPS = ['rcp45', 'rcp60', 'rcp85'] RCPS = ['rcp45', '
rcp85'] # CLIMATES = ['CCSM4', 'Ensemble', 'GFDLCM3', 'HadGEM2ES'] CLIMATES = ['Ensemble'] # YEARS = ['1990', '2030',
'2060', '2090'] YEARS = ['1990', '2060'] con = sqlite3.connect('/usr/local/apps/OR_Climate_Grid/Data/orclimgrid.sqlite') cur = con.cursor() table_query = """PRAGMA table_info(climattrs);""" cur.execute(table_query) result = cur.fetchall() species = [] for col in result: if col[0] > 20: species.append(col[1]) # import ipdb # ipdb.set_trace() #FOR TESTING # species = species[0:1] for spec in species: for clim in CLIMATES: for rcp in RCPS: for year in YEARS: print "Querying %s %s %s %s" % (spec, clim, rcp, year) climquery.the_stuff(['climquery.py', clim, rcp, year, spec, 'file'])
vitorfs/cmdbox
cmdbox/snippets/migrations/0004_auto_20160331_0959.py
Python
mit
592
0.001689
# -*- coding: utf-8 -*- # G
enerated by Django 1.9.4 on 2016-03-31 09:59 from __future__ import unicode_literals from django.db import m
igrations, models class Migration(migrations.Migration): dependencies = [ ('snippets', '0003_auto_20160331_0952'), ] operations = [ migrations.AlterField( model_name='snippet', name='description', field=models.TextField(blank=True, help_text='Give a brief description of what this snippet is about. Not required.', max_length=100, null=True, verbose_name='description'), ), ]
Hemisphere-Project/HPlayer2
profiles/_legacy/gadagne.py
Python
gpl-3.0
696
0
from core.engine import hplayer # PLAYER player = hplayer.addplayer('mpv', 'gadagne') # Interfaces player.addInterface('osc', 4000, 4001) player.addInterface('htt
p', 8080) # player.addInterface('gpio', [16,19,20,21,26]) # GADAGNE logic defaultFile = 'media0.mp4' push1File = 'media1.mp4' push2File = 'media2.mp4' push3File = 'media3.mp4' # Loop default file player.on('end', lambda: player.play(defaultFile)) # HTTP + GPIO events player.on(['push1', 'gpio20'], lambda: player.play(push1File)) player.on(['push2', 'gpio21'], lambda: player.play(push2File)) player.on(['push3', 'gpio26'], lambda:
player.play(push3File)) fails = 5 # RUN hplayer.setBasePath("/home/pi/Videos/") hplayer.run()
heltonbiker/MapComplete
PyQt/SlippyMapOriginal.py
Python
mit
8,532
0.003868
#!/usr/bin/env python import sip sip.setapi('QVariant', 2) import math from PyQt4 import QtCore, QtGui, QtNetwork from lib.Point import Point from lib.tileOperations import * TDIM = 256 class LightMaps(QtGui.QWidget): def __init__(self, parent = None): super(LightMaps, self).__init__(parent) self.pressed = False self.snapped = False self._map = SlippyMap(self) self.pressPos = QtCore.QPoint() self.dragPos = QtCore.QPoint() self._map.updated.connect(self.updateMap) def setCenter(self, lat, lng): self._map.latitude = lat self._map.longitude = lng self._map.invalidate() def updateMap(self, r): self.update(r) def resizeEvent(self, event): self._map.width = self.width() self._map.height = self.height() self._map.invalidate() def paintEvent(self, event): p = QtGui.QPainter() p.begin(self) self._map.render(p, event.rect()) p.setPen(QtCore.Qt.black) p.end() def mousePressEvent(self, event): if event.buttons() != QtCore.Qt.LeftButton: return self.pressed = self.snapped = True self.pressPos = self.dragPos = event.pos() def mouseMoveEvent(self, event): if not event.buttons(): return if not self.pressed or not self.snapped: delta = event.pos() - self.pressPos self.pressPos = event.pos() self._map.pan(delta) return else: threshold = 10 delta = event.pos() - self.pressPos if self.snapped: self.snapped &= delta.x() < threshold self.snapped &= delta.y() < threshold self.snapped &= delta.x() > -threshold self.snapped &= delta.y() > -threshold self.dragPos = event.pos() def mouseReleaseEvent(self, event): self.update() def wheelEvent(self, event): delta = event.delta() delta = abs(delta)/delta self._map.change_zoom(delta) self.update(); def keyPressEvent(self, event): if event.key() == QtCore.Qt.Key_Left: self._map.pan(QtCore.QPoint(20, 0)) if event.key() == QtCore.Qt.Key_Right: self._map.pan(QtCore.QPoint(-20, 0)) if event.key() == QtCore.Qt.Key_Up: self._map.pan(QtCore.QPoint(0, 20)) if event.key() == QtCore.Qt.Key_Down: self._map.pan(QtCore.QPoint(0, -20)) if event.key() == QtCore.Qt.Key_Z or event.key() == QtCore.Qt.Key_Select: self.dragPos = QtCore.QPoint(self.width() / 2, self.height() / 2) class SlippyMap(QtCore.QObject): updated = QtCore.pyqtSignal(QtCore.QRect) def __init__(self, parent=None): super(SlippyMap, self).__init__(parent) self._offset = QtCore.QPoint() self._tilesRect = QtCore.QRect() self._tilePixmaps = {} # Point(x, y) to QPixmap mapping self._manager = TileDownloader(self) ##QtNetwork.QNetworkAccessManager() ############# #self._manager.finished.connect(self.handleNetworkData) self._url = QtCore.QUrl() # public vars self.width = 400 self.height = 300 self.zoom = 7 self.latitude = -30 self.longitude = -51.2 self._emptyTile = QtGui.QPixmap(TDIM, TDIM) self._emptyTile.fill(QtCore.Qt.lightGray) ############## ############### def invalidate(self): if self.width <= 0 or self.height <= 0: return print self.latitude, self.longitude, self.zoom tx, ty = tileIndexForCoordinate(self.latitude, self.longitude, self.zoom) # tx = ct.x() # ty = ct.y() # top-left corner of the center tile xp = int(self.width / 2 - (tx - math.floor(tx)) * TDIM) yp = int(self.height / 2 - (ty - math.floor(ty)) * TDIM) # first tile vertical and horizontal xa = (xp + TDIM - 1) / TDIM ya = (yp + TDIM - 1) / TDIM xs = int(tx) - xa ys = int(ty) - ya # offset for top-left tile self._offset = QtCore.QPoint(xp - xa * TDIM, yp - ya * TDIM) # last tile vertical and horizontal xe = int(tx) + (self.width - xp - 1) / TDIM ye = int(ty) + (self.height - yp - 1) / TDIM # build a rect self._tilesRect = QtCore.QRect(xs, ys, xe - xs + 1, ye - ys + 1) if self._url.isEmpty(): self._manager.download() self.updated.emit(QtCore.QRect(0, 0, self.width, self.height)) def render(self, painter, rect): for x in range(self._tilesRect.width()): for y in range(self._tilesRect.height()): print x, y tp = Point(x + self._tilesRect.left(), y + self._tilesRect.top()) box = QtCore.QRect(self._manager.tileRect(tp)) if rect.intersects(box): print "Box", box painter.drawPixmap(box, self._tilePixmaps.get(tp, self._emptyTile)) def pan(self, delta): dx = QtCore.QPointF(delta) / float(TDIM) cx, cy = tileIndexForCoordinate(self.latitude, self.longitude, self.zoom) center = QtCore.QPointF(cx, cy) - dx self.latitude = latitudeFromTileY(center.y(), self.zoom) self.longitude = longitudeFromTileX(center.x(), self.zoom) self.invalidate() def change_zoom(self, val): self.zoom = max(1, min(22, self.zoom + val)) print "ZOOM", self.zoom se
lf.invalidate(); ############################ class TileDownloader(QtNetwork.QNetworkAccessManager): updated
= QtCore.pyqtSignal(QtCore.QRect) def __init__(self, parent=None): super(TileDownloader, self).__init__() self.parent = parent cache = QtNetwork.QNetworkDiskCache() cache.setCacheDirectory( QtGui.QDesktopServices.storageLocation (QtGui.QDesktopServices.CacheLocation)) self.setCache(cache) self.finished.connect(self.handleNetworkData) # slots def handleNetworkData(self, reply): img = QtGui.QImage() tp = Point(reply.request().attribute(QtNetwork.QNetworkRequest.User)) url = reply.url() if not reply.error(): if img.load(reply, None): self.parent._tilePixmaps[tp] = QtGui.QPixmap.fromImage(img) reply.deleteLater() self.parent.updated.emit(self.tileRect(tp)) # purge unused tiles bound = self.parent._tilesRect.adjusted(-2, -2, 2, 2) for tp in list(self.parent._tilePixmaps.keys()): if not bound.contains(tp): del self.parent._tilePixmaps[tp] self.download() def download(self): grab = None for x in range(self.parent._tilesRect.width()): for y in range(self.parent._tilesRect.height()): tp = Point(self.parent._tilesRect.topLeft() + QtCore.QPoint(x, y)) if tp not in self.parent._tilePixmaps: grab = QtCore.QPoint(tp) break if grab is None: self._url = QtCore.QUrl() return #path = 'http://tile.openstreetmap.org/%d/%d/%d.png' % (self.zoom, grab.x(), grab.y()) path = 'https://mts2.google.com/vt?lyrs=y&x={0}&y={1}&z={2}'.format(grab.x(), grab.y(), self.parent.zoom) print path self._url = QtCore.QUrl(path) request = QtNetwork.QNetworkRequest() request.setUrl(self._url) request.setRawHeader('User-Agent', 'Nokia (PyQt) Graphics Dojo 1.0') request.setAttribute(QtNetwork.QNetworkRequest.User, grab) self.get(request) ################################ def tileRect(self, tp): t = tp - self.parent._tilesRect.topLeft() x = t.x() * TDIM + self.parent._offset.x() y = t.y() * TDIM + self.parent._offset.y() return QtCore.QRect(x, y, TDIM, TDIM) if __name__ == '__main__': import sys class MapZoom(QtGui.QMainWindow): def __init__(self): super
xguse/pyrad
pyrad/createfile.py
Python
gpl-3.0
3,821
0.011515
#!/usr/bin/env python2 import sys def main(version): output = """ ==** parameter inputs for pyRAD version %s **======================== affected step == ./ ## 1. Working directory (all) ./*.fastq.gz ## 2. Loc. of non-demultiplexed files (if not line 18) (s1) ./*.barcodes ## 3. Loc. of barcode file (if not line 18) (s1) vsearch ## 4. command (or path) to call vsearch (or usearch) (s3,s6) muscle ## 5. command (or path) to call muscle (s3,s7) TGCAG ## 6. Restriction overhang (e.g., C|TGCAG -> TGCAG) (s1,s2) 2 ## 7. N processors (parallel) (all) 6 ## 8. Mindepth: min coverage for a cluster (s4,s5) 4 ## 9. NQual: max # sites with qual < 20 (or see line 20)(s2) .88 ## 10. Wclust: clustering threshold as a decimal (s3,s6) rad ## 11. Datatype: rad,gbs,pairgbs,pairddrad,(others:see docs)(all) 4 ## 12. MinCov: min sa
mples in a final locus (s7) 3 ## 13. MaxSH: max inds with shared hetero site (s7) c88d6m4p3 ## 14. Prefix name for final output (no spaces) (s7) ==== optional params below this line ==========================
========= affected step == ## 15.opt.: select subset (prefix* only selector) (s2-s7) ## 16.opt.: add-on (outgroup) taxa (list or prefix*) (s6,s7) ## 17.opt.: exclude taxa (list or prefix*) (s7) ## 18.opt.: loc. of de-multiplexed data (s2) ## 19.opt.: maxM: N mismatches in barcodes (def= 1) (s1) ## 20.opt.: phred Qscore offset (def= 33) (s2) ## 21.opt.: filter: def=0=NQual 1=NQual+adapters. 2=strict (s2) ## 22.opt.: a priori E,H (def= 0.001,0.01, if not estimated) (s5) ## 23.opt.: maxN: max Ns in a cons seq (def=5) (s5) ## 24.opt.: maxH: max heterozyg. sites in cons seq (def=5) (s5) ## 25.opt.: ploidy: max alleles in cons seq (def=2;see docs) (s4,s5) ## 26.opt.: maxSNPs: (def=100). Paired (def=100,100) (s7) ## 27.opt.: maxIndels: within-clust,across-clust (def. 3,99) (s3,s7) ## 28.opt.: random number seed (def. 112233) (s3,s6,s7) ## 29.opt.: trim overhang left,right on final loci, def(0,0) (s7) ## 30.opt.: output formats: p,n,a,s,v,u,t,m,k,g,* (see docs) (s7) ## 31.opt.: maj. base call at depth>x<mindepth (def.x=mindepth) (s5) ## 32.opt.: keep trimmed reads (def=0). Enter min length. (s2) ## 33.opt.: max stack size (int), def= max(500,mean+2*SD) (s3) ## 34.opt.: minDerep: exclude dereps with <= N copies, def=1 (s3) ## 35.opt.: use hierarchical clustering (def.=0, 1=yes) (s6) ## 36.opt.: repeat masking (def.=1='dust' method, 0=no) (s3,s6) ## 37.opt.: vsearch max threads per job (def.=6; see docs) (s3,s6) ==== optional: list group/clade assignments below this line (see docs) ==================""" % (version) outfile = open("params.txt",'w') print >>sys.stderr, "\tnew params.txt file created" print >>outfile, "\n".join(output.split("\n")[1:])
xinghalo/DMInAction
src/spider/BrandSpider/brand1.py
Python
apache-2.0
2,854
0.044697
#!/usr/bin/env python # -*- coding: utf-8 -*- import urllib import urllib2 import re import json class Spider: def __init__(self): self.url = 'http://brand.efu.com.cn/' self.user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' self.headers = { 'User-Agent' : self.user_agent } def getBrandCategory(self): f = open('brand1.csv','a') f.write('品牌,目标消费群体,分类\n') f.close() content = self.getPageContext(self.url) items = self.resolveIndexContent(content) for line in items: context = [line[0]] # 循环遍历每一个分类下的页码 url = line[1] for num in range(1,1000): nexturl = self.url+url[:-6]+str(num)+".html" # 拼接每一页的url pageContent = self.getPageContext(nexturl) # 爬取分页的内容 # 判断此页是否有内容 if pageContent.find('<div class="lstPhotob">') == -1: break # 处理分页页面内容 pageItems = self.resolvePageContent(pageContent,context[0]) if len(pageItems) == 0: break for pageLine in pageItems: # print pageLine[0] # print pageLine[1] brandContent = self.getPageContext(pageLine[0]) brandItems = self.resolveBrandContext(brandContent) if len(brandItems) == 0: break f = open('brand1.csv','a') for brandLine in brandItems: if brandLine[0] == '目标消费群体': output = str(pageLine[1])+","+str(brandLine[1])+","+s
tr(line[0]) print output f.write(output) f.write("\n") break f.close() def resolveBrandContext(self,content): # [\s\S]+? try: pattern = re.compile('.*?<span class="sp-a">(.*?)</span>.*?<span class="sp-b">(.*?)</span>.*?') return re.findall(pattern,content) except: # print '忽略解析品牌页面出错问题' return [] def resolveIndexContent(self,content): try: pattern = re.compile('.*?<
li><a title="(.*?)" href="(.*?)">.*?</a></li>.*?') return re.findall(pattern,content) except: # print '忽略解析首页出错问题' return [] def resolvePageContent(self,content,category): # pattern = re.compile('.*?<div class="lstPhotob"><div class="lstPa"><div class="lstPa-a"><a href="(.*?)" target="_blank" title="(.*?)>.*?') try: pattern = re.compile('.*?<a href="(.*?)" target="_blank" title="(.*?)'+category+'品牌">.*?') return re.findall(pattern,content) except: # print '忽略解析分页页面出错问题' return [] def getPageContext(self,url): # print '爬取页面',url try: request = urllib2.Request(url,headers = self.headers) response = urllib2.urlopen(request) return response.read() except: 1 # print '忽略url发送出错问题' def run(self): self.getBrandCategory() spider = Spider() spider.run()
brain-tec/server-tools
datetime_formatter/tests/test_best_matcher.py
Python
agpl-3.0
2,584
0
#
Copyright 2015, 2017 Jairo Llopis <jairo.llopis@tecnativa.com> # Copyright 2016 Tecnativa, S.L. - Vicent Cubells # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from odoo.tests.common import TransactionCase from odoo.exceptions import UserError class BasicCase(TransactionCase): def setUp(self): super()
.setUp() self.langs = ("en_US", "es_ES", "it_IT", "pt_PT", "zh_CN") self.rl = self.env["res.lang"] for lang in self.langs: if not self.rl.search([("code", "=", lang)]): self.rl.load_lang(lang) def test_explicit(self): """When an explicit lang is used.""" for lang in self.langs: self.assertEqual(self.rl.best_match(lang).code, lang) def test_record(self): """When called from a ``res.lang`` record.""" rl = self.rl.with_context(lang="it_IT") rl.env.user.lang = "pt_PT" for lang in self.langs: self.assertEqual( rl.search([("code", "=", lang)]).best_match().code, lang) def test_context(self): """When called with a lang in context.""" self.env.user.lang = "pt_PT" for lang in self.langs: self.assertEqual( self.rl.with_context(lang=lang).best_match().code, lang) def test_user(self): """When lang not specified in context.""" for lang in self.langs: self.env.user.lang = lang # Lang is False in context self.assertEqual( self.rl.with_context(lang=False).best_match().code, lang) # Lang not found in context self.assertEqual( self.rl.with_context(dict()).best_match().code, lang) def test_first_installed(self): """When falling back to first installed language.""" first = self.rl.search([("active", "=", True)], limit=1) self.env.user.lang = False self.assertEqual( self.rl.with_context(lang=False).best_match().code, first.code) def test_unavailable(self): """When matches to an unavailable language.""" self.env.user.lang = False self.rl = self.rl.with_context(lang=False) first = self.rl.search([("active", "=", True)], limit=1) # Safe mode self.assertEqual(self.rl.best_match("fake_LANG").code, first.code) # Unsafe mode with self.assertRaises(UserError): self.rl.best_match("fake_LANG", failure_safe=False)
bjoernricks/kaizen
kaizen/phase/phase.py
Python
gpl-2.0
2,714
0.001474
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80: # kaizen - Continuously improve, build and manage free software # # Copyright (C) 2011 Björn Ricks <bjoern.ricks@gmail.com> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA NONE = "None" DOWNLOADED = "Downloaded" EXTRACTED = "Extracted" PATCHED = "Patched" CONFIGURED = "Configured" BUILT = "Built" DESTROOTED = "Destrooted" ACTIVATED = "Activated" class UnknownPhaseError(Exception): def __init__(self, name): self.name =
name def __str__(self): return "Phase '%s' does not exist." % (self.name) class Phase(object): def __init__(self, name, value):
self.value = value self.name = name def __cmp__(self, other): if self.value < other.value: return -1 if self.value == other.value: return 0 if self.value > other.value: return 1 def __eq__(self, other): if not isinstance(other, Phase): return False return self.value == other.value def __neq__(self, other): return not self.__eq__(other) def __hash__(self): return self.value def __repr__(self): return "<Phase name='%s' value='%s' id='%s'>" % (self.name, self.value, id(self)) class Phases(object): def __init__(self): self.phases = dict() self.phase_names = [ NONE, DOWNLOADED, EXTRACTED, PATCHED, CONFIGURED, BUILT, DESTROOTED, ACTIVATED, ] for i, name in enumerate(self.phase_names): self.phases[name] = Phase(name, i) def get(self, name): if not name in self.phases: raise UnknownPhaseError(name) return self.phases[name] phases_list = Phases()
caspartse/QQ-Groups-Spider
vendor/pyexcel_io/constants.py
Python
mit
1,834
0.003272
""" pyexcel_io.constants ~~~~~~~~~~~~~~~~~~~ Constants appeared in pyexcel :copyright: (c) 2014-2017 by Onni Software Ltd. :license: New BSD License """ # flake8: noqa DEFAULT_NAME = 'pyexcel' DEFAULT_SHEET_NAME = '%s_sheet1' % DEFAULT_NAME DEFAULT_PLUGIN_NAME = '__%s_io_plugins
__' % DEFAULT_NAME MESSAGE_INVALID_PARAMETERS = "Invalid parameters" MESSAGE_ERROR_02 = "No content, file name. Nothing is given" MESSAGE_ERROR_03 = "cannot handle unknown content" MESSAGE_WRONG_IO_INSTANCE = "Wrong io instance is passed for your file format." MESSAGE_CANNOT_WRITE_STREAM_FORMATTER = "Cannot write content of file type %s to stream" MESSAGE_CANNOT_READ_STREAM_FORMATTER = "Cannot read content of file
type %s from stream" MESSAGE_CANNOT_WRITE_FILE_TYPE_FORMATTER = "Cannot write content of file type %s to file %s" MESSAGE_CANNOT_READ_FILE_TYPE_FORMATTER = "Cannot read content of file type %s from file %s" MESSAGE_LOADING_FORMATTER = "The plugin for file type %s is not installed. Please install %s" MESSAGE_EMPTY_ARRAY = "One empty row is found" MESSAGE_IGNORE_ROW = "One row is ignored" MESSAGE_DB_EXCEPTION = """ Warning: Bulk insertion got below exception. Trying to do it one by one slowly.""" FILE_FORMAT_CSV = 'csv' FILE_FORMAT_TSV = 'tsv' FILE_FORMAT_CSVZ = 'csvz' FILE_FORMAT_TSVZ = 'tsvz' FILE_FORMAT_ODS = 'ods' FILE_FORMAT_XLS = 'xls' FILE_FORMAT_XLSX = 'xlsx' FILE_FORMAT_XLSM = 'xlsm' DB_SQL = 'sql' DB_DJANGO = 'django' KEYWORD_TSV_DIALECT = 'excel-tab' KEYWORD_LINE_TERMINATOR = 'lineterminator' SKIP_DATA = -1 TAKE_DATA = 0 STOP_ITERATION = 1 DEFAULT_MULTI_CSV_SEPARATOR = '__' SEPARATOR_FORMATTER = '---%s---' % DEFAULT_NAME + "%s" SEPARATOR_MATCHER = "---%s:(.*)---" % DEFAULT_NAME DEFAULT_CSV_STREAM_FILE_FORMATTER = ( "---%s:" % DEFAULT_NAME + "%s---%s") DEFAULT_CSV_NEWLINE = '\r\n'
daj0ker/BinPy
BinPy/examples/source/ic/Series_7400/IC7443.py
Python
bsd-3-clause
1,225
0.001633
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <headingcell level=2> # Usage of IC 7443 # <codecell> from __future__ import print_function from BinPy import * # <codecell> # Usage of IC 7443: ic = IC_7443() print(ic.__doc__) # <codecell> # The Pin configuration is: inp = {8: 0, 12: 0, 13: 1, 14: 0, 15: 1, 16: 1} # Pin initinalization # Powering up the IC - using -- ic.setIC({14: 1, 7: 0}) ic.setIC({14: 1, 7: 0}) # Setting the inputs of the ic ic.setIC(inp) # Draw the IC with the current configuration\
n ic.drawIC() # <codecell> # Run the IC with the current configuration using -- print ic.run() -- # Note that the ic.run() returns a dict of pin configuration similar to print (ic.run()) # <codecell> # Seting the outputs to the current IC configuration using -- # ic.setIC(ic.run()) --\n ic.setIC(ic.run()) # Draw the final configuration ic.drawIC() # <codece
ll> # Seting the outputs to the current IC configuration using -- # ic.setIC(ic.run()) -- ic.setIC(ic.run()) # Draw the final configuration ic.drawIC() # Run the IC print (ic.run()) # <codecell> # Connector Outputs c = Connector() # Set the output connector to a particular pin of the ic ic.setOutput(1, c) print(c)
LKajan/georef
djangoProject/georef/forms.py
Python
mit
642
0.003115
from django import forms from django.contrib.gis.forms import ModelForm as GeoModelForm,
BaseGeometryWidget from django.forms.models import inlineformset_factory from django.contrib.auth.models import User from georef.models import * class GCPForm(GeoModelForm): class Meta: model = GCP widgets = { 'ground': forms.HiddenInput(),
'image': forms.HiddenInput(), } class KuvaForm(forms.ModelForm): class Meta: model = Kuva fields = ['name', 'kuvaus', 'shootTime', 'shootHeight', 'tyyppi', 'tags'] GCPFormset = inlineformset_factory(Kuva, GCP, form=GCPForm, extra=0)
cgranade/qutip
qutip/tests/test_sparse.py
Python
bsd-3-clause
5,409
0.001294
import numpy as np from numpy.testing import run_module_suite, assert_equal, assert_almost_equal import scipy.sparse as sp from qutip.random_objects import (rand_dm, rand_herm, rand_ket) from
qutip.states import coherent from qutip.sparse import (sp_bandwidth, sp_permute, sp_reverse_permute, sp_profile, sp_one_norm, sp_inf_norm) from qutip.cy.spmath import zcsr_kron def _permu
tateIndexes(array, row_perm, col_perm): return array[np.ix_(row_perm, col_perm)] def _dense_profile(B): row_pro = 0 for i in range(B.shape[0]): j = np.where(B[i, :] != 0)[0] if np.any(j): if j[-1] > i: row_pro += (j[-1]-i) col_pro = 0 for j in range(B.shape[0]): i = np.where(B[:, j] != 0)[0] if np.any(i): if i[-1] > j: col_pro += i[-1]-j ans = (row_pro+col_pro, col_pro, row_pro) return ans def test_sparse_symmetric_permute(): "Sparse: Symmetric Permute" # CSR version A = rand_dm(25, 0.5) perm = np.random.permutation(25) x = sp_permute(A.data, perm, perm).toarray() z = _permutateIndexes(A.full(), perm, perm) assert_equal((x - z).all(), 0) # CSC version B = A.data.tocsc() y = sp_permute(B, perm, perm).toarray() assert_equal((y - z).all(), 0) def test_sparse_nonsymmetric_permute(): "Sparse: Nonsymmetric Permute" # CSR version A = rand_dm(25, 0.5) rperm = np.random.permutation(25) cperm = np.random.permutation(25) x = sp_permute(A.data, rperm, cperm).toarray() z = _permutateIndexes(A.full(), rperm, cperm) assert_equal((x - z).all(), 0) # CSC version B = A.data.tocsc() y = sp_permute(B, rperm, cperm).toarray() assert_equal((y - z).all(), 0) def test_sparse_symmetric_reverse_permute(): "Sparse: Symmetric Reverse Permute" # CSR version A = rand_dm(25, 0.5) perm = np.random.permutation(25) x = sp_permute(A.data, perm, perm) B = sp_reverse_permute(x, perm, perm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC version B = A.data.tocsc() perm = np.random.permutation(25) x = sp_permute(B, perm, perm) B = sp_reverse_permute(x, perm, perm) assert_equal((A.full() - B.toarray()).all(), 0) def test_sparse_nonsymmetric_reverse_permute(): "Sparse: Nonsymmetric Reverse Permute" # CSR square array check A = rand_dm(25, 0.5) rperm = np.random.permutation(25) cperm = np.random.permutation(25) x = sp_permute(A.data, rperm, cperm) B = sp_reverse_permute(x, rperm, cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC square array check A = rand_dm(25, 0.5) rperm = np.random.permutation(25) cperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, rperm, cperm) B = sp_reverse_permute(x, rperm, cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSR column vector check A = coherent(25, 1) rperm = np.random.permutation(25) x = sp_permute(A.data, rperm, []) B = sp_reverse_permute(x, rperm, []) assert_equal((A.full() - B.toarray()).all(), 0) # CSC column vector check A = coherent(25, 1) rperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, rperm, []) B = sp_reverse_permute(x, rperm, []) assert_equal((A.full() - B.toarray()).all(), 0) # CSR row vector check A = coherent(25, 1).dag() cperm = np.random.permutation(25) x = sp_permute(A.data, [], cperm) B = sp_reverse_permute(x, [], cperm) assert_equal((A.full() - B.toarray()).all(), 0) # CSC row vector check A = coherent(25, 1).dag() cperm = np.random.permutation(25) B = A.data.tocsc() x = sp_permute(B, [], cperm) B = sp_reverse_permute(x, [], cperm) assert_equal((A.full() - B.toarray()).all(), 0) def test_sp_bandwidth(): "Sparse: Bandwidth" for kk in range(10): A = sp.rand(100, 100, density=0.1, format='csr') ans1 = sp_bandwidth(A) A = A.toarray() i, j = np.nonzero(A) ans2 = ((j-i).max()+(i-j).max()+1, (i-j).max(), (j-i).max()) assert_equal(ans1, ans2) for kk in range(10): A = sp.rand(100, 100, density=0.1, format='csc') ans1 = sp_bandwidth(A) A = A.toarray() i, j = np.nonzero(A) ans2 = ((j-i).max()+(i-j).max()+1, (i-j).max(), (j-i).max()) assert_equal(ans1, ans2) def test_sp_profile(): "Sparse: Profile" for kk in range(10): A = sp.rand(1000, 1000, 0.1, format='csr') pro = sp_profile(A) B = A.toarray() ans = _dense_profile(B) assert_equal(pro, ans) for kk in range(10): A = sp.rand(1000, 1000, 0.1, format='csc') pro = sp_profile(A) B = A.toarray() ans = _dense_profile(B) assert_equal(pro, ans) def test_sp_one_norm(): "Sparse: one-norm" for kk in range(10): H = rand_herm(100,0.1).data nrm = sp_one_norm(H) ans = max(abs(H).sum(axis=0).flat) assert_almost_equal(nrm,ans) def test_sp_inf_norm(): "Sparse: inf-norm" for kk in range(10): H = rand_herm(100,0.1).data nrm = sp_inf_norm(H) ans = max(abs(H).sum(axis=1).flat) assert_almost_equal(nrm,ans) if __name__ == "__main__": run_module_suite()
factorlibre/odoo-addons-cpo
product_average_consumption_rules/model/res_config.py
Python
agpl-3.0
1,571
0
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). # © 2018 FactorLibre - Álvaro Marcos <alvaro.marcos@factorlibre.com> from openerp import api, models, fields class ResConfig(models.TransientModel): _inherit = 'res.config.s
ettings' initial_date = fields.Date('Initial date') end_date = fields.Date('End date') added_locations = fields.Many2many( 'stock.location', string='Added locations to consumption') apply_to_calculation = fields.Boo
lean("Apply to calculation") @api.multi def set_values(self): super(ResConfig, self).set_values() Sudo = self.env['ir.config_parameter'].sudo() Sudo.set_param('initial_date', self.initial_date) Sudo.set_param('end_date', self.end_date) Sudo.set_param('added_locations', self.added_locations.ids) Sudo.set_param('apply_to_calculation', self.apply_to_calculation) @api.model def get_values(self): res = super(ResConfig, self).get_values() Sudo = self.env['ir.config_parameter'].sudo() initial_date = Sudo.get_param('initial_date') end_date = Sudo.get_param('end_date') end_date = Sudo.get_param('end_date') added_locations = Sudo.get_param('added_locations') apply_to_calculation = Sudo.get_param('apply_to_calculation') res.update({ 'initial_date': initial_date, 'end_date': end_date, 'added_locations': [(6, 0, added_locations)], 'apply_to_calculation': apply_to_calculation }) return res
Scalr/libcloud
libcloud/extra/drivers/google.py
Python
apache-2.0
6,824
0.002638
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Module for Google Big Data Drivers. """ from libcloud.extra.drivers.google_bq_utils import QueryJob from libcloud.common.google import GoogleAuthType, GoogleBaseConnection from libcloud.common.base import BaseDriver API_VERSION = 'v2' class BQConnection(GoogleBaseConnection): """ Connection class for the BQ driver. """ def __init__(self, user_id, key, secure=None, auth_type=None, credential_file=None, **kwargs): project = kwargs.pop('project') super(BQConnection, self).__init__(user_id, key, secure=secure, auth_type=auth_type, credential_file=credential_file, **kwargs) self.request_path = '/bigquery/%s/projects/%s' % (API_VERSION, project) class BigQuery(BaseDriver): """ Google Big Query client """ connectionCls = BQConnection api_name = 'google' name = 'Big Query' default_scopes = ['https://www.googleapis.com/auth/bigquery', 'https://www.googleapis.com/auth/bigquery.insertdata', 'https://www.googleapis.com/auth/cloud-platform.read-only', 'https://www.googleapis.com/auth/devstorage.full_control', 'https://www.googleapis.com/auth/devstorage.read_only', 'https://www.googleapis.com/auth/devstorage.read_write'] def __init__(self, user_id, key, project, **kwargs): """ :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. :type user_id: ``str`` :param key: The RSA Key (for service accounts) or file path containing key or Client Secret (for installed apps) to be used for authentication. :type key: ``str`` :keyword project: Your project name. (required) :type project: ``str`` :keyword auth_type: Accepted values are "SA" or "IA" or "GCE" ("Service Account" or "Installed Application" or "GCE" if libcloud is being used on a GCE instance with service account enabled). If not supplied, auth_type will be guessed based on value of user_id or if the code is being executed in a GCE instance. :type auth_type: ``str`` :keyword scopes: List of authorization URLs. Default is empty and grants read/write to Compute, Storage, DNS. :type scopes: ``list`` """ self.project = project if 'auth_type' not in kwargs: kwargs['auth_type'] = GoogleAuthType.SA self.scopes = kwargs.get('scopes', self.default_scopes) super(BigQuery, self).__init__(user_id, key, **kwargs) def _ex_connection_class_kwargs(self): """ Add extra parameters to auth request """ res = super(BigQuery, self)._ex_connection_class_kwargs() res['project'] = self.project res['scopes'] = self.scopes return res def list_datasets(self): """ Get list of datasets Api reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list :return: list of dicts. Each dict contains two keys 'datasetId' and 'projectId' """ request = '/datasets' response = self.connection.request(request, method='GET').object return [l['datasetReference'] for l in response['datasets']] def list_tables(self, dataset_id): """ Get list of tables for dataset Api reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list :param dataset_id: str. Id of dataset. :return: list of dicts. Each dict contains next keys 'datasetId', 'projectId' and 'tableId' """ request = '/datasets/%s/tables' % dataset_id response = self.connection.request(request, method='GET').object return [l['tableReference'] for l in response['tables']] def query(self, query, max_results=50000, timeout_ms=60000, use_legacy_sql=F
alse): """ Execute query and return result. Result will be chunked. Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/query :param query: str. BQ query. Example: SELECT * FROM {billing_table} LIMIT 1 :param max_results: int. Page size :param timeout_ms: int. Max execution time. Default 1 min :param use_legacy_sql: bool. Specifie
s whether to use BigQuery's legacy SQL dialect for this query. :return: dict which represent row from result """ request = '/queries' data = {'query': query, 'useLegacySql': use_legacy_sql, 'maxResults': max_results, 'timeoutMs': timeout_ms} response = self.connection.request(request, method='POST', data=data).object query_job = QueryJob(response) return self._get_job_results(query_job, max_results, timeout_ms) def _get_job_results(self, query_job, max_results, timeout_ms): """ Deal with paginated QueryJob results Reference: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/getQueryResults :param query_job: query job object :return: generator over rows """ while True: for row in query_job.rows: yield row if not query_job.page_token: # last page break # next request data = { 'maxResults': max_results, 'pageToken': query_job.page_token, 'timeoutMs': timeout_ms } request = '/queries/' + query_job.job_id response = self.connection.request(request, method='GET', params=data).object query_job = QueryJob(response)
msampathkumar/rms
app/old_views.py
Python
apache-2.0
709
0.011283
from flask import render_template from flask.ext.appbuilder.models.sqla.interface import SQLAInterface from flask.ext.appbuilder im
port ModelView from app import appbuilder, db """ Create your Views:: class MyModelView(ModelView): datamodel = SQLAInterface(MyModel) Next, register your Views:: appbuilder.add_view(MyModelView, "My View", icon="fa-folder-open-o", category="My Category", category_icon='fa-envelope') """ """ Application wide 404 error handler """ @appbuilder.app.errorhandler(404) def page_not_found(e): return render_template('404.html',\ base_temp
late=appbuilder.base_template, \ appbuilder=appbuilder), 404 db.create_all()
robe16/kiosk.grandparent-message-board
src/config/cfg.py
Python
gpl-3.0
3,338
0.004793
import json import os import ast from datetime import datetime def get_json(): with open(os.path.join(os.path.dirname(__file__), 'config.json'), 'r') as data_file: data = json.load(data_file) return data def put_json(new_data): try: # try: new_data = ast.literal_eval(new_data) except: new_data = new_data # with open(os.path.join(os.path.dirname(__file__), 'config.json'), 'w+') as output_file: output_file.write(json.dumps(new_data, indent=4, separators=(',', ': '))) output_file.close() # return True except Exception as e: return False ################################################################################################ def get_cfg_json(): data = get_json() return data['config'] ################################################################################################ # General ################################################################################################ def get_config_general(): data = get_cfg_json() return data['general'] def get_config_general_title(): data = get_config_general() return data['title'] ################################################################################################ # Weather ################################################################################################ def get_config_weather(): data = get_cfg_json() return data['weather'] def get_config_weather_metoffice_appkey(): data = get_config_weather() return data['metoffice_appkey'] def get_config_weather_town(): data = get_config_weather() return data['cfg_town'] ################################################################################################ # Google ################################################################################################ def get_config_google(): data = get_cfg_json() return data['google'] def get_config_google_googlesheet(): data = get_config_google() return data['google_sheet'] def get_config_google_googlesheetId(): data = get_config_google_googlesheet() return data['google_sheetId'] def get_config_google_googlesheetRange(): data = get_config_google_googlesheet() return data['google_sheetRange'] #####################################################################
########################### # Axiscare ################################################################################################ def get_config_axiscare(): data = get_cfg_json() return data['axiscare'] def get_config_axiscare_url(): data = get_config_axiscare() return data['
url'] def get_config_axiscare_date(): data = get_config_axiscare() return data['dateReceived'] def put_config_axiscare_url(url): data = get_config_axiscare() data['config']['axiscare']['dateReceived'] = datetime.now().strftime('%Y-%m-%d') data['config']['axiscare']['url'] = url put_json(data) ################################################################################################ # Axiscare ################################################################################################ def get_config_emailsafelist(): data = get_cfg_json() return data['email_safelist']
ProfessorX/Config
.PyCharm30/system/python_stubs/-1247972723/samba/dcerpc/drsuapi/DsReplicaNeighbourCtr.py
Python
gpl-2.0
866
0.008083
# encoding: utf-8 # module samba.dcerpc.drsuapi # from /usr/lib/python2.7/dist-packages/samba/dcerpc/drsuapi.so # by generator 1.135 """ drsuapi DCE/RPC """ # imports import dcerpc as __dcerpc import talloc as __talloc class DsReplicaNeighbourCtr(__talloc.Object): # no doc def __init__(self, *args, **kwargs): #
real signature unknown pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass array = property(lambda self: object(), lambda self, v: Non
e, lambda self: None) # default count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default reserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
scheib/chromium
third_party/tlslite/tlslite/utils/cryptomath.py
Python
bsd-3-clause
8,434
0.010197
# Authors: # Trevor Perrin # Martin von Loewis - python 3 port # Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2 # # See the LICENSE file for legal information regarding use of this file. """cryptomath module This module has basic math/crypto code.""" from __future__ import print_function import os import math import base64 import binascii from .compat import * # ************************************************************************** # Load Optional Modules # ************************************************************************** # Try to load M2Crypto/OpenSSL try: from M2Crypto import m2 m2cryptoLoaded = True except ImportError: m2cryptoLoaded = False #Try to load GMPY try: import gmpy gmpyLoaded = True except ImportError: gmpyLoaded = False #Try to load pycrypto try: import Crypto.Cipher.AES pycryptoLoaded = True except ImportError: pycryptoLoaded = False # ************************************************************************** # PRNG Functions # ************************************************************************** # Check that os.urandom works import zlib length = len(zlib.compress(os.urandom(1000))) assert(length > 900) def getRandomBytes(howMany): b = bytearray(os.urandom(howMany)) assert(len(b) == howMany) return b prngName = "os.urandom" # ************************************************************************** # Simple hash functions # ************************************************************************** import hmac import hashlib def MD5(b): return bytearray(hashlib.md5(compat26Str(b)).digest()) def SHA1(b): return bytearray(hashlib.sha1(compat26Str(b)).digest()) def SHA256(b): return bytearray(hashlib.sha256(compat26Str(b)).digest()) def HMAC_MD5(k, b): k = compatHMAC(k) b = compatHMAC(b) return bytearray(hmac.new(k, b, hashlib.md5).digest()) def HMAC_SHA1(k, b): k = compatHMAC(k) b = compatHMAC(b) return bytearray(hmac.new(k, b, hashlib.sha1).digest()) def HMAC_SHA256(k, b): k = compatHMAC(k) b = compatHMAC(b) return bytearray(hmac.new(k, b, hashlib.sha256).digest()) # ************************************************************************** # Converter Functions # ************************************************************************** def bytesToNumber(b): total = 0 multiplier = 1 for count in range(len(b)-1, -1, -1): byte = b[count] total += multiplier * byte multiplier *= 256 return total def numberToByteArray(n, howManyBytes=None): """Convert an integer into a bytearray, zero-pad to howManyBytes. The returned bytearray may be smaller than howManyBytes, but will not be larger. The returned bytearray will contain a big-endian encoding of the input integer (n). """ if howManyBytes == None: howManyBytes = numBytes(n) b = bytearray(howManyBytes) for count in range(howManyBytes-1, -1, -1): b[count] = int(n % 256) n >>= 8 return b def mpiToNumber(mpi): #mpi is an openssl-format bignum string if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number raise AssertionError() b = bytearray(mpi[4:]) return bytesToNumber(b) def numberToMPI(n): b = numberToByteArray(n) ext = 0 #If the high-order bit is going to be set, #add an extra byte of zeros if (numBits(n) & 0x7)==0: ext = 1 length = numBytes(n) + ext b = bytearray(4+ext) + b b[0] = (length >> 24) & 0xFF b[1] = (length >> 16) & 0xFF b[2] = (length >> 8) & 0xFF b[3] = length & 0xFF return bytes(b) # ************************************************************************** # Misc. Utility Functions # ************************************************************************** def numBits(n): if n==0: return 0 s = "%x" % n return ((len(s)-1)*4) + \ {'0':0, '1':1, '2':2, '3':2, '4':3, '5':3, '6':3, '7':3, '8':4, '9':4, 'a':4, 'b':4, 'c':4, 'd':4, 'e':4, 'f':4, }[s[0]] return int(math.floor(math.log(n, 2))+1) def numBytes(n): if n==0: return 0 bits = numBits(n) return int(math.ceil(bits / 8.0)) # ************************************************************************** # Big Number Math # ************************************************************************** def getRandomNumber(low, high): if low >= high: raise AssertionError() howManyBits = numBits(high) howManyBytes = numBytes(high) lastBits = howManyBits % 8 while 1: bytes = getRandomBytes(howManyBytes) if lastBits: bytes[0] = bytes[0] % (1 << lastBits) n = bytesToNumber(bytes) if n >= low and n < high: return n def gcd(a,b): a, b = max(a,b), min(a,b) while b: a, b = b, a % b return a def lcm(a, b): return (a * b) // gcd(a, b) #Returns inverse of a mod b, zero if none #Uses Extended Euclidean Algorithm def invMod(a, b): c, d = a, b uc, ud = 1, 0 while c != 0: q = d // c c, d = d-(q*c), c uc, ud = ud - (q * uc), uc if d == 1: return ud % b return 0 if gmpyLoaded: def powMod(base, power, modulus): base = gmpy.mpz(base) power = gmpy.mpz(power) modulus = gmpy.mpz(modulus) result = pow(base, power, modulus) return long(result) else: def powMod(base, power, modulus): if power < 0: result = pow(base, power*-1, modulus) result = invMod(result, modulus) return result else: return pow(base, power, modulus) #Pre-calculate a sieve of the ~100 primes < 1000: def makeSieve(n): sieve = list(range(n)) for count in range(2, int(math.sqrt(n))+1): if sieve[count] == 0: continue x = sieve[count] * 2 while x < len(sieve): sieve[x] = 0 x += sieve[coun
t] sieve = [x for x in sieve[2:] if x] return sieve sieve = makeSieve(1000) def isPrime(n, iterations=5, display=False): #Trial division with sieve for x in sieve: if x >= n: return True if n % x == 0: return False #Passed trial divisi
on, proceed to Rabin-Miller #Rabin-Miller implemented per Ferguson & Schneier #Compute s, t for Rabin-Miller if display: print("*", end=' ') s, t = n-1, 0 while s % 2 == 0: s, t = s//2, t+1 #Repeat Rabin-Miller x times a = 2 #Use 2 as a base for first iteration speedup, per HAC for count in range(iterations): v = powMod(a, s, n) if v==1: continue i = 0 while v != n-1: if i == t-1: return False else: v, i = powMod(v, 2, n), i+1 a = getRandomNumber(2, n) return True def getRandomPrime(bits, display=False): if bits < 10: raise AssertionError() #The 1.5 ensures the 2 MSBs are set #Thus, when used for p,q in RSA, n will have its MSB set # #Since 30 is lcm(2,3,5), we'll set our test numbers to #29 % 30 and keep them there low = ((2 ** (bits-1)) * 3) // 2 high = 2 ** bits - 30 p = getRandomNumber(low, high) p += 29 - (p % 30) while 1: if display: print(".", end=' ') p += 30 if p >= high: p = getRandomNumber(low, high) p += 29 - (p % 30) if isPrime(p, display=display): return p #Unused at the moment... def getRandomSafePrime(bits, display=False): if bits < 10: raise AssertionError() #The 1.5 ensures the 2 MSBs are set #Thus, when used for p,q in RSA, n will have its MSB set # #Since 30 is lcm(2,3,5), we'll set our test numbers to #29 % 30 and keep them there low = (2 ** (bits-2)) * 3//2 high = (2 ** (bits-1)) - 30 q = getRandomNumber(low, high) q += 29 - (q % 30) while 1: if display: print(".", end=' ') q += 30 if (q >= high): q = getRandomNumber(low, high) q += 29 - (q % 30) #Ideas from Tom Wu's SRP c
kyokley/MediaViewer
mediaviewer/tests/views/test_signout.py
Python
mit
1,454
0
import mock from django.test import TestCase from mediaviewer.views.signout import signout class TestSignout(TestCase): def setUp(self): self.logout_patcher = mock.patch('mediaviewer.views.signout.logout') self.mock_logout = self.logout_patcher.start() self.addCleanup(self.logout_patcher.stop) self.setSiteWideContext_patcher = mock.patch( 'mediaviewer.vi
ews.signout.setSiteWideContext') self.mock_setSiteWideContext = self.setSiteWideContext_patcher.start()
self.addCleanup(self.setSiteWideContext_patcher.stop) self.render_patcher = mock.patch('mediaviewer.views.signout.render') self.mock_render = self.render_patcher.start() self.addCleanup(self.render_patcher.stop) self.request = mock.MagicMock() def test_signout(self): expected_context = {'active_page': 'logout', 'loggedin': False, 'title': 'Signed out'} expected = self.mock_render.return_value actual = signout(self.request) self.assertEqual(expected, actual) self.mock_logout.assert_called_once_with(self.request) self.mock_setSiteWideContext.assert_called_once_with( expected_context, self.request) self.mock_render.assert_called_once_with( self.request, 'mediaviewer/logout.html', expected_context)
naturali/tensorflow
tensorflow/contrib/framework/python/ops/variables.py
Python
apache-2.0
22,660
0.005825
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Variable functions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope from tensorflow.python import pywrap_tensorflow from tensorflow.python.framework import device as tf_device from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import saver as tf_saver __all__ = ['add_model_variable', 'assert_global_step', 'assert_or_get_global_step', 'assign_from_checkpoint', 'assign_from_checkpoint_fn', 'assign_from_values', 'assign_from_values_fn', 'create_global_step', 'get_global_step', 'get_or_create_global_step', 'get_local_variables', 'get_model_variables', 'get_unique_variable', 'get_variables_by_name', 'get_variables_by_suffix', 'get_variables_to_restore', 'get_variables', 'local_variable', 'model_variable', 'variable', 'VariableDeviceChooser'] def assert_global_step(global_step_tensor): """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`. Args: global_step_tensor: `Tensor` to test. """ if not (isinstance(global_step_tensor, variables.Variable) or isinstance(global_step_tensor, ops.Tensor)): raise TypeError('Existing "global_step" must be a Variable or Tensor.') if not global_step_tensor.dtype.base_dtype.is_integer: raise TypeError( 'Existing "global_step" does not have integer type: %s' % global_step_tensor.dtype) if global_step_tensor.get_shape().ndims != 0: raise TypeError( 'Existing "global_step" is not scalar: %s' % global_step_tensor.get_shape()) def assert_or_get_global_step(graph=None, global_step_tensor=None): """Verifies that a global step tensor is valid or gets one if None is given. If `global_step_tensor` is not None, check that it is a valid global step tensor (using `assert_global_step`). Otherwise find a global step tensor using `get_global_step` and return it. Args: graph: The graph to find the global step tensor for. global_step_tensor: The tensor
to check for suitability as a global step. If
None is given (the default), find a global step tensor. Returns: A tensor suitable as a global step, or `None` if none was provided and none was found. """ if global_step_tensor is None: # Get the global step tensor the same way the supervisor would. global_step_tensor = get_global_step(graph) else: assert_global_step(global_step_tensor) return global_step_tensor # TODO(ptucker): Change supervisor to use this when it's migrated to core. def get_global_step(graph=None): """Get the global step tensor. The global step tensor must be an integer variable. We first try to find it in the collection `GLOBAL_STEP`, or by name `global_step:0`. Args: graph: The graph to find the global step in. If missing, use default graph. Returns: The global step variable, or `None` if none was found. Raises: TypeError: If the global step tensor has a non-integer type, or if it is not a `Variable`. """ graph = ops.get_default_graph() if graph is None else graph global_step_tensor = None global_step_tensors = graph.get_collection(ops.GraphKeys.GLOBAL_STEP) if len(global_step_tensors) == 1: global_step_tensor = global_step_tensors[0] elif not global_step_tensors: try: global_step_tensor = graph.get_tensor_by_name('global_step:0') except KeyError: return None else: logging.error('Multiple tensors in global_step collection.') return None assert_global_step(global_step_tensor) return global_step_tensor def create_global_step(graph=None): """Create global step tensor in graph. Args: graph: The graph in which to create the global step. If missing, use default graph. Returns: Global step tensor. Raises: ValueError: if global step key is already defined. """ graph = ops.get_default_graph() if graph is None else graph if get_global_step(graph) is not None: raise ValueError('"global_step" already exists.') # Create in proper graph and base name_scope. with graph.as_default() as g, g.name_scope(None): collections = [ops.GraphKeys.VARIABLES, ops.GraphKeys.GLOBAL_STEP] return variable(ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer, trainable=False, collections=collections) def get_or_create_global_step(graph=None): """Returns and create (if necessary) the global step variable. Args: graph: The graph in which to create the global step. If missing, use default graph. Returns: the tensor representing the global step variable. """ graph = ops.get_default_graph() if graph is None else graph globalstep = get_global_step(graph) if globalstep is None: globalstep = create_global_step(graph) return globalstep def local_variable(initial_value, validate_shape=True, name=None): """Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection. Args: initial_value: See variables.Variable.__init__. validate_shape: See variables.Variable.__init__. name: See variables.Variable.__init__. Returns: New variable. """ return variables.Variable( initial_value, trainable=False, collections=[ops.GraphKeys.LOCAL_VARIABLES], validate_shape=validate_shape, name=name) @contrib_add_arg_scope def variable(name, shape=None, dtype=None, initializer=None, regularizer=None, trainable=True, collections=None, caching_device=None, device=None): """Gets an existing variable with these parameters or creates a new one. Args: name: the name of the new or existing variable. shape: shape of the new or existing variable. dtype: type of the new or existing variable (defaults to `DT_FLOAT`). initializer: initializer for the variable if one is created. regularizer: a (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection GraphKeys.REGULARIZATION_LOSSES and can be used for regularization. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable). collections: A list of collection names to which the Variable will be added. If None it would default to tf.GraphKeys.VARIABLES. caching_device: Optional device string or function describing where the Variable should be cached for reading. Defaults to the Variable's device. device: Optional device to place the variable. It can be an string or a function that is called to get the device for the variable. Returns: The created or existing variable. """ collections = list(collections or [ops.GraphKeys.VARIABLES]) # Remove duplicates collecti
brahmastra2016/bleachbit
bleachbit/GUI.py
Python
gpl-3.0
36,930
0.000948
#!/usr/bin/env python # vim: ts=4:sw=4:expandtab # BleachBit # Copyright (C) 2008-2017 Andrew Ziem # https://www.bleachbit.org # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, print_function import bleachbit from bleachbit.Cleaner import backends, register_cleaners from bleachbit.GuiPreferences import PreferencesDialog from bleachbit.Options import options from bleachbit import _, _p, APP_NAME, appicon_path, portable_mode from bleachbit import Cleaner, FileUtilities from bleachbit import GuiBasic import logging import os import sys import threading import time import types import warnings warnings.simplefilter('error') import pygtk pygtk.require('2.0') import gtk import gobject warnings.simplefilter('default') if 'nt' == os.name: from bleachbit import Windows logger = logging.getLogger(__name__) def threaded(func): """Decoration to create a threaded function""" def wrapper(*args):
thread = threadin
g.Thread(target=func, args=args) thread.start() return wrapper class TreeInfoModel: """Model holds information to be displayed in the tree view""" def __init__(self): self.tree_store = gtk.TreeStore( gobject.TYPE_STRING, gobject.TYPE_BOOLEAN, gobject.TYPE_PYOBJECT, gobject.TYPE_STRING) if None == self.tree_store: raise Exception("cannot create tree store") self.row_changed_handler_id = None self.refresh_rows() self.tree_store.set_sort_func(3, self.sort_func) self.tree_store.set_sort_column_id(3, gtk.SORT_ASCENDING) def get_model(self): """Return the tree store""" return self.tree_store def on_row_changed(self, __treemodel, path, __iter): """Event handler for when a row changes""" parent = self.tree_store[path[0]][2] child = None if 2 == len(path): child = self.tree_store[path][2] value = self.tree_store[path][1] options.set_tree(parent, child, value) def refresh_rows(self): """Clear rows (cleaners) and add them fresh""" if None != self.row_changed_handler_id: self.tree_store.disconnect(self.row_changed_handler_id) self.tree_store.clear() for key in sorted(backends): if not any(backends[key].get_options()): # localizations has no options, so it should be hidden # https://github.com/az0/bleachbit/issues/110 continue c_name = backends[key].get_name() c_id = backends[key].get_id() c_value = options.get_tree(c_id, None) if not c_value and options.get('auto_hide') and backends[key].auto_hide(): logger.debug("automatically hiding cleaner '%s'", c_id) continue parent = self.tree_store.append(None, (c_name, c_value, c_id, "")) for (o_id, o_name) in backends[key].get_options(): o_value = options.get_tree(c_id, o_id) self.tree_store.append(parent, (o_name, o_value, o_id, "")) self.row_changed_handler_id = self.tree_store.connect("row-changed", self.on_row_changed) def sort_func(self, model, iter1, iter2): """Sort the tree by the display name""" s1 = model[iter1][0].lower() s2 = model[iter2][0].lower() if s1 == s2: return 0 if s1 > s2: return 1 return -1 class TreeDisplayModel: """Displays the info model in a view""" def make_view(self, model, parent, context_menu_event): """Create and return a TreeView object""" self.view = gtk.TreeView(model) # listen for right click (context menu) self.view.connect("button_press_event", context_menu_event) # first column self.renderer0 = gtk.CellRendererText() self.column0 = gtk.TreeViewColumn(_("Name"), self.renderer0, text=0) self.view.append_column(self.column0) self.view.set_search_column(0) # second column self.renderer1 = gtk.CellRendererToggle() self.renderer1.set_property('activatable', True) self.renderer1.connect('toggled', self.col1_toggled_cb, model, parent) self.column1 = gtk.TreeViewColumn(_("Active"), self.renderer1) self.column1.add_attribute(self.renderer1, "active", 1) self.view.append_column(self.column1) # third column self.renderer2 = gtk.CellRendererText() if hasattr(self.renderer2, 'set_alignment'): # requires PyGTK 2.22 # http://www.pygtk.org/pygtk2reference/class-gtkcellrenderer.html#method-gtkcellrenderer--set-alignment self.renderer2.set_alignment(1.0, 0.0) # TRANSLATORS: Size is the label for the column that shows how # much space an option would clean or did clean self.column2 = gtk.TreeViewColumn(_("Size"), self.renderer2, text=3) self.column2.set_alignment(1.0) self.view.append_column(self.column2) # finish self.view.expand_all() return self.view def set_cleaner(self, path, model, parent_window, value=None): """Activate or deactive option of cleaner.""" if None == value: # if not value given, toggle current value value = not model[path][1] assert(type(value) is types.BooleanType) assert(type(model) is gtk.TreeStore) cleaner_id = None i = path if type(i) is str: # type is either str or gtk.TreeIter i = model.get_iter(path) parent = model.iter_parent(i) if None != parent: # this is an option (child), not a cleaner (parent) cleaner_id = model[parent][2] option_id = model[path][2] if cleaner_id and value: # when toggling an option, present any warnings warning = backends[cleaner_id].get_warning(option_id) # TRANSLATORS: %(cleaner) may be Firefox, System, etc. # %(option) may be cache, logs, cookies, etc. # %(warning) may be 'This option is really slow' msg = _("Warning regarding %(cleaner)s - %(option)s:\n\n%(warning)s") % \ {'cleaner': model[parent][0], 'option': model[path][0], 'warning': warning} if warning: resp = GuiBasic.message_dialog(parent_window, msg, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK_CANCEL) if gtk.RESPONSE_OK != resp: # user cancelled, so don't toggle option return model[path][1] = value def col1_toggled_cb(self, cell, path, model, parent_window): """Callback for toggling cleaners""" self.set_cleaner(path, model, parent_window) i = model.get_iter(path) # if toggled on, enable the parent parent = model.iter_parent(i) if None != parent and model[path][1]: model[parent][1] = True # if all siblings toggled off, disable the parent if parent and not model[path][1]: sibling = model.iter_nth_child(parent, 0) any_true = False while sibling: if model[sibling][1]: any_true = True sibling = model.iter_next(sibling) if not any_true: model[parent][1
ChinaQuants/bokeh
bokeh/_legacy_charts/_builder.py
Python
bsd-3-clause
7,596
0.00237
"""This is the Bokeh charts interface. It gives you a high level API to build complex plot is a simple way. This is the Builder class, a minimal prototype class to build more chart types on top of it. """ #----------------------------------------------------------------------------- # Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #------------------------------------------------------------------------
----- # Imports #----------------------------------------------------------------------------- from __future__ import absolute_import from ._chart import LegacyChart from ._data_adapter import DataAdapter from ..models.ranges import Range from ..
properties import Color, HasProps, Instance, Seq DEFAULT_PALETTE = ["#f22c40", "#5ab738", "#407ee7", "#df5320", "#00ad9c", "#c33ff3"] #----------------------------------------------------------------------------- # Classes and functions #----------------------------------------------------------------------------- def create_and_build(builder_class, values, **kws): builder_props = set(builder_class.properties()) # create the new builder builder_kws = { k:v for k,v in kws.items() if k in builder_props} builder = builder_class(values, **builder_kws) # create a chart to return, since there isn't one already chart_kws = { k:v for k,v in kws.items() if k not in builder_props} chart = LegacyChart(**chart_kws) chart.add_builder(builder) return chart class Builder(HasProps): """ A prototype class to inherit each new chart Builder type. It provides useful methods to be used by the inherited builder classes, in order to automate most of the charts creation tasks and leave the core customization to specialized builder classes. In that pattern inherited builders just need to provide: - the following methods: * _yield_renderers: yields the glyphs to be rendered into the plot (and eventually create the self._legends attribute to be used to create the proper legends when builder is called to build the glyphs on a LegacyChart object * _process_data(optional): Get the input data and calculates the 'data' attribute to be used to calculate the source data * _set_sources(optional): Push data into the self.source attribute (of type ColumnDataSource) and build the proper ranges (self.x_range and self.y_range). - the following attributes: x_range: y_range: _legends: so Builder can use it all to _yield_renderers on a chart when called with the create method. """ x_range = Instance(Range) y_range = Instance(Range) palette = Seq(Color, default=DEFAULT_PALETTE) def __init__(self, values=None, **kws): """Common arguments to be used by all the inherited classes. Args: values (iterable): iterable 2d representing the data series values matrix. legend (str, bool): the legend of your plot. The legend content is inferred from incoming input.It can be ``top_left``, ``top_right``, ``bottom_left``, ``bottom_right``. It is ``top_right`` is you set it as True. palette(list, optional): a list containing the colormap as hex values. Attributes: source (obj): datasource object for your plot, initialized as a dummy None. x_range (obj): x-associated datarange object for you plot, initialized as a dummy None. y_range (obj): y-associated datarange object for you plot, initialized as a dummy None. groups (list): to be filled with the incoming groups of data. Useful for legend construction. data (dict): to be filled with the incoming data and be passed to the ColumnDataSource in each chart inherited class. Needed for _set_And_get method. attr (list): to be filled with the new attributes created after loading the data dict. Needed for _set_And_get method. """ super(Builder, self).__init__(**kws) if values is None: values = [] self._values = values # TODO: No real reason why legends should be *private*, should be # legends self._legends = [] self._data = {} self._groups = [] self._attr = [] def _adapt_values(self): """Prepare the input data. Converts data input (self._values) to a DataAdapter and creates instance index if needed """ if hasattr(self, 'index'): self._values_index, self._values = DataAdapter.get_index_and_data( self._values, self.index ) else: if not isinstance(self._values, DataAdapter): self._values = DataAdapter(self._values, force_alias=False) def _process_data(self): """Get the input data. It has to be implemented by any of the inherited class representing each different chart type. It is the place where we make specific calculations for each chart. """ pass def _set_sources(self): """Push data into the ColumnDataSource and build the proper ranges. It has to be implemented by any of the inherited class representing each different chart type. """ pass def _yield_renderers(self): """ Generator that yields the glyphs to be draw on the plot It has to be implemented by any of the inherited class representing each different chart type. """ pass def create(self, chart=None): self._adapt_values() self._process_data() self._set_sources() renderers = self._yield_renderers() chart.add_renderers(self, renderers) # create chart ranges.. if not chart.x_range: chart.x_range = self.x_range if not chart.y_range: chart.y_range = self.y_range # always contribute legends, let LegacyChart sort it out legends = self._legends chart.add_legend(legends) return chart #*************************** # Some helper methods #*************************** def _set_and_get(self, data, prefix, attr, val, content): """Set a new attr and then get it to fill the self._data dict. Keep track of the attributes created. Args: data (dict): where to store the new attribute content attr (list): where to store the new attribute names val (string): name of the new attribute content (obj): content of the new attribute """ data["%s%s" % (prefix, val)] = content attr.append("%s%s" % (prefix, val)) def set_and_get(self, prefix, val, content): """Set a new attr and then get it to fill the self._data dict. Keep track of the attributes created. Args: prefix (str): prefix of the new attribute val (string): name of the new attribute content (obj): content of the new attribute """ self._set_and_get(self._data, prefix, self._attr, val, content)
jokuf/hack-blog
posts/migrations/0008_auto_20170327_1923.py
Python
mit
469
0
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-03-27 19:23 from __future__ import unicode_literals from django.db import migra
tions, models class Migration(migrations.Migration): dependencies = [ ('posts', '0007_auto_20170327_1919'), ] operations = [ migrations.AlterField( model_name='post', name='image', field=models.ImageField(blank=True, null=True, upload_to=''),
), ]
archman/phantasy
phantasy/library/lattice/flame.py
Python
bsd-3-clause
43,973
0.003434
# encoding: UTF-8 """ Utility for generating a FLAME lattice from accelerator layout. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import re import logging import os.path from collections import OrderedDict import numpy from flame import GLPSPrinter from phantasy.library.parser import Configuration from phantasy.library.layout import DriftElement from phantasy.library.layout import ValveElement from phantasy.library.layout import CavityElement from phantasy.library.layout import BLMElement from phantasy.library.layout import BLElement from phantasy.library.layout import BCMElement from phantasy.library.layout import BPMElement from phantasy.library.layout import PMElement from phantasy.library.layout import SolCorElement from phantasy.library.layout import PortElement from phantasy.library.layout import CorElement from phantasy.library.layout import BendElement from phantasy.library.layout import QuadElement from phantasy.library.layout import StripElement from phantasy.library.layout import SextElement from phantasy.library.layout import EBendElement from phantasy.library.layout import EQuadElement from phantasy.library.layout import FCElement from phantasy.library.layout import VDElement from phantasy.library.layout import EMSElement from phantasy.library.layout import ElectrodeElement from phantasy.library.layout import SolElement from phantasy.library.settings import Settings try: basestring except NameError: basestring = str CONFIG_FLAME_SIM_TYPE = "flame_sim_type" CONFIG_FLAME_CAV_TYPE = "flame_cav_type" CONFIG_FLAME_CAV_CONF = "flame_cav_conf" CONFIG_FLAME_DATA_DIR = "flame_data_dir" CONFIG_FLAME_MPOLE_LEVEL = "flame_mpole_level" CONFIG_FLAME_HDIPOLE_FIT_MODE = "flame_hdipole_fit_mode" CONFIG_FLAME_PARTICLE_MASS = "flame_particle_mass" CONFIG_FLAME_INITIAL_ENERGY = "flame_initial_energy" CONFIG_FLAME_CHARGE = "flame_charge" CONFIG_FLAME_COUNT = "flame_count" CONFIG_FLAME_STRIPPER_CHARGE = "flame_stripper_charge" CONFIG_FLAME_STRIPPER_COUNT = "flame_stripper_count" CONFIG_FLAME_INITIAL_POSITION_FILE = "flame_initial_position_file" CONFIG_FLAME_INITIAL_ENVELOPE_FILE = "flame_initial_envelope_file" CONFIG_FLAME_SPLIT = "flame_split" CONFIG_FLAME_EQUAD_RADIUS = "flame_radius" CONFIG_FLAME_EBEND_PHI = "flame_phi" CONFIG_FLAME_EBEND_FRINGEX = 'flame_fringe_x' CONFIG_FLAME_EBEND_FRINGEY = 'flame_fringe_y' CONFIG_FLAME_EBEND_VERBOOL = 'flame_ver' CONFIG_FLAME_EBEND_SPHERBOOL = 'flame_spher' CONFIG_FLAME_EBEND_ASYMFAC = 'flame_asym_fac' CONFIG_FLAME_BEND_FOCUSING = 'focusing_component' # drift mask: bool CONFIG_DRIFT_MASK = "drift_mask" # Sextupole CONFIG_FLAME_SEXT_STEP = 'step' CONFIG_FLAME_SEXT_DSTKICK = 'dstkick' DEFAULT_FLAME_SEXT_STEP = 10 DEFAULT_FLAME_SEXT_DSTKICK = 1 # Position type for PM CONFIG_PM_ANGLE = 'pm_angle' DEFAULT_PM_ANGLE = "-45" # Constants used for IMPACT header parameters SIM_TYPE_MOMENT_MATRIX = "MomentMatrix" MPOLE_LEVEL_FOCUS_DEFOCUS = 0 MPOLE_LEVEL_DIPOLE = 1 MPOLE_LEVEL_QUADRUPOLE = 2 HDIPOLE_FIT_MODE_BEAM_ENERGY = 1 HDIPOLE_FIT_MODE_NONE = 0 # Default values for IMPACT lattice generation _DEFAULT_SIM_TYPE = SIM_TYPE_MOMENT_MATRIX _DEFAULT_MPOLE_LEVEL = MPOLE_LEVEL_QUADRUPOLE _DEFAULT_HDIPOLE_FIT_MODE = HDIPOLE_FIT_MODE_BEAM_ENERGY _DEFAULT_CHARGE = 0.138655462 _DEFAULT_COUNT = 1.0 _DEFAULT_PARTICLE_MASS = 931.49432e6 _DEFAULT_INITIAL_ENERGY = 0.0 _DEFAULT_INITIAL_POSITION = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ] _DEFAULT_INITIAL_ENVELOPE = [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] _DEFAULT_DATA_DIR = "data" _DEFAULT_SPLIT = 1 _LOGGER = logging.getLogger(__name__) def build_lattice(accel, **kwargs): """Build ``FlameLattice`` object. Parameters ---------- accel : Layout object. Keyword Arguments ----------------- config : Configuration options. settings : Accelerator settings. start : Start element. end : End element. template : Template. Returns ------- ret : Flame lattice object. """ lattice_factory = FlameLatticeFactory(accel, **kwargs) return lattice_factory.build() class BaseLatticeFactory(object): """ """ def __init__(self): pass def _get_config_default(self, option, defvalue): if self.config.has_default(option): value = self.config.get_default(option) _LOGGER.debug("BaseLatticeFactory: '{}' found in configuration: {}".format(option, value)) return value return defvalue def _get_config_int_default(self, option, defvalue): if self.config.has_default(option): value = self.config.getint_default(option) _LOGGER.debug("BaseLatticeFactory: '{}' foun
d in configuration: {}".format(option, value)) return value return defvalue def _get_config_float_default(self, option, defvalue): if self.config.has_default(option): value = self.config.getfloat_default(option) _LOGGER.debug("BaseLatticeFactory: '{}' found in configuration: {}".format(option, value)) return value ret
urn defvalue def _get_config_array_default(self, option, defvalue, conv=None, unpack=True): if self.config.has_default(option): value = self.config.getarray_default(option, conv=conv) if unpack and (len(value) == 1): value = value[0] _LOGGER.debug("BaseLatticeFactory: '{}' found in configuration: {}".format(option, value)) return value return defvalue def _get_config_abspath_default(self, option, defvalue): if self.config.has_default(option): value = self.config.getabspath_default(option) _LOGGER.debug("BaseLatticeFactory: '{}' found in configuration: {}".format(option, value)) return value return defvalue def _get_config(self, section, option, defvalue): if self.config.has_option(section, option): value = self.config.get(section, option) _LOGGER.debug("BaseLatticeFactory: [{}] '{}' found in configuration: {}".format(section, option, value)) return value return defvalue def _get_config_array(self, section, option, defvalue, conv=None, unpack=True): if self.config.has_option(section, option): value = self.config.getarray(section, option, conv=conv) if unpack and (len(value) == 1): value = value[0] _LOGGER.debug("BaseLatticeFactory: [{}] '{}' found in configuration: {}".format(section, option, value)) return value return defvalue def build(self): raise NotImplemented() class FlameLatticeFactory(BaseLatticeFactory): """FlameLatticeFactory class builds a FLAME Lattice object from an Accelerator Design Description. Parameters ---------- accel : Accelerator layout. Keyword Arguments ----------------- config : Configuration options. settings : Accelerator settings. start : Start element. end : End element. template : Template. """ def __init__(self, accel, **kwargs): super(FlameLatticeFactory, self).__init__() self._accel = accel if kwargs.get("config", None) is not None: self.config = kwargs.get("config") else: self.config = Configuration() if kwargs.get("settings", None) is not None: self.settings = kwargs.get("settings") else: self.settings = self._get_config_settings() self.simType = kwargs.get("simType", None) self.dataDir = kwargs.get("dataDir", None) self.count = kwargs.get("count", None) self.charge = kwargs.get("charge", None)
madisona/django-pseudo-cms
example/settings.py
Python
bsd-3-clause
3,794
0.000264
# Django settings for example project. import sys from os.path import
abspath, dirname, join PROJECT_DIR = abspath(dirname(__file__)) grandparent = abspath(join(PROJECT_DIR, '..')) for path in (
grandparent, PROJECT_DIR): if path not in sys.path: sys.path.insert(0, path) DEBUG = True ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': join(PROJECT_DIR, 'local.db'), } } TIME_ZONE = 'America/Chicago' LANGUAGE_CODE = 'en-us' SITE_ID = 1 USE_I18N = True USE_L10N = True USE_TZ = True MEDIA_ROOT = join(PROJECT_DIR, 'media') MEDIA_URL = '/media/' STATIC_ROOT = '' STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '1z3q9ef2q0vm^*=p=ksuag6++(o!eg^w=d!&amp;mtoa_e9ib-kf5$' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ ], 'OPTIONS': { 'context_processors': [ # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this # list if you haven't customized them: 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.request', 'django.contrib.messages.context_processors.messages', ], 'loaders': ('django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader',) }, }, ] MIDDLEWARE = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'example.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'example.wsgi.application' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'pseudo_cms', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
ajparsons/useful_inkleby
useful_inkleby/useful_django/models/__init__.py
Python
mit
162
0.024691
from .mixins import * from .flexi import * class FlexiBulkModel(
FlexiModel,EasyBu
lkModel,StockModelHelpers): class Meta: abstract = True
meletakis/collato
esn/actstream/urls.py
Python
gpl-2.0
2,399
0.005002
try: from django.conf.urls import url, patterns except ImportError: from django.conf.urls.defaults import url, patterns from actstream import feeds from actstream import views from django.contrib.auth.decorators import login_required urlpatterns = patterns('actstream.views', # Syndication Feeds url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/
atom/$'
, feeds.AtomObjectActivityFeed(), name='actstream_object_feed_atom'), url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', feeds.ObjectActivityFeed(), name='actstream_object_feed'), url(r'^feed/(?P<content_type_id>\d+)/atom/$', feeds.AtomModelActivityFeed(), name='actstream_model_feed_atom'), url(r'^feed/(?P<content_type_id>\d+)/(?P<object_id>\d+)/as/$', feeds.ActivityStreamsObjectActivityFeed(), name='actstream_object_feed_as'), url(r'^feed/(?P<content_type_id>\d+)/$', feeds.ModelActivityFeed(), name='actstream_model_feed'), url(r'^feed/$', feeds.UserActivityFeed(), name='actstream_feed'), url(r'^feed/atom/$', feeds.AtomUserActivityFeed(), name='actstream_feed_atom'), # Follow/Unfollow API url(r'^follow/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'follow_unfollow', name='actstream_follow'), url(r'^follow_all/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'follow_unfollow', {'actor_only': False}, name='actstream_follow_all'), url(r'^unfollow/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'follow_unfollow', {'do_follow': False}, name='actstream_unfollow'), # Follower and Actor lists url(r'^followers/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'followers', name='actstream_followers'), url(r'^actors/(?P<content_type_id>\d+)/(?P<object_id>\d+)/$', 'actor', name='actstream_actor'), url(r'^actors/(?P<content_type_id>\d+)/$', 'model', name='actstream_model'), url(r'^new_wall_post/$', view=login_required (views.new_wall_post), name='new_wall_post'), url(r'^detail/(?P<action_id>\d+)/$', view=login_required(views.detail), name='actstream_detail'), url(r'^(?P<username>[-\w]+)/$', view=login_required (views.user), name='actstream_user'), url(r'^$', view=login_required (views.stream), name='actstream'), url(r'^new_group_post', view=login_required (views.new_group_post), name='new_group_post'), )
Shuailong/Leetcode
solutions/first-unique-character-in-a-string.py
Python
mit
660
0
#!/usr/bin/env python # encoding: utf-8 """ first-unique-character-in-a-string.py Created by Shuailong on 2016-09-01. https://leetcode.com/problems/first-unique-character-in-a-string/. """ # 376 ms from collections
import Counter class Solution(object): def firstUniqChar(self, s): """ :typ
e s: str :rtype: int """ c = Counter(s) for i in range(len(s)): if c[s[i]] == 1: return i return -1 def main(): solution = Solution() print solution.firstUniqChar('leetcode') print solution.firstUniqChar('loveleetcode') if __name__ == '__main__': main()
jmcanterafonseca/fiware-orion
test/acceptance/behave/components/common_steps/general_steps.py
Python
agpl-3.0
18,232
0.003189
# -*- coding: utf-8 -*- """ Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U This file is part of Orion Context Broker. Orion Context Broker is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Orion Context Broker is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with Orion Context Broker. If not, see http://www.gnu.org/licenses/. For those usages not covered by this license please contact with iot_support at tid dot es """ __author__ = 'Iván Arias León (ivan dot ariasleon at telefonica dot com)' import behave from behave import step from iotqatools.helpers_utils import * from iotqatools.cb_v2_utils import CB from iotqatools.mongo_utils import Mongo from iotqatools.remote_log_utils import Remote_Log from iotqatools.fabric_utils import FabricSupport from tools.properties_config import Properties # methods in properties class from tools.NGSI_v2 import NGSI # constants properties_class = Properties() CONTEXT_BROKER_ENV = u'context_broker_env' MONGO_ENV = u'mongo_env' # HTTP status code status_codes = {'OK': 200, 'Created': 201, 'No Content': 204, 'Moved Permanently': 301, 'Redirect': 307, 'Bad Request': 400, 'unauthorized': 401, 'Not Found': 404, 'Method Not Allowed': 405, 'Not Acceptable': 406, 'Conflict': 409, 'Content Length Required': 411, 'Request Entity Too Large': 413, 'Unsupported Media Type': 415, 'Unprocessable Entity': 422, 'Internal Server Error': 500} behave.use_step_matcher("re") __logger__ = logging.getLogger("steps") # --------------- general_operations ---------------------- @step(u'send a API entry point request') def send_a_base_request(context): """ send a API entry point request :param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave. """ __logger__.debug("Sending a
API entry point request: /v2 ...") props = propertie
s_class.read_properties()[CONTEXT_BROKER_ENV] context.cb = CB(protocol=props["CB_PROTOCOL"], host=props["CB_HOST"], port=props["CB_PORT"]) context.resp = context.cb.get_base_request() __logger__.info("...Sent a API entry point request: /v2 correctly") @step(u'send a version request') def send_a_version_request(context): """ send a version request :param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave. """ __logger__.debug("Sending a version request...") context.props_cb_env = properties_class.read_properties()[CONTEXT_BROKER_ENV] context.cb = CB(protocol=context.props_cb_env["CB_PROTOCOL"], host=context.props_cb_env["CB_HOST"], port=context.props_cb_env["CB_PORT"]) context.resp = context.cb.get_version_request() __logger__.info("..Sent a version request correctly") send_a_version_request = step(u'send a version request')(send_a_version_request) @step(u'send a statistics request') def send_a_statistics_request(context): """ send a statistics request :param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave. """ __logger__.debug("Sending a statistics request...") context.props_cb_env = properties_class.read_properties()[CONTEXT_BROKER_ENV] context.cb = CB(protocol=context.props_cb_env["CB_PROTOCOL"], host=context.props_cb_env["CB_HOST"], port=context.props_cb_env["CB_PORT"]) context.resp = context.cb.get_statistics_request() __logger__.info("..Sent a statistics request correctly") @step(u'send a cache statistics request') def send_a_cache_statistics_request(context): """ send a cache statistics request :param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave. """ __logger__.debug("Sending a statistics request...") context.props_cb_env = properties_class.read_properties()[CONTEXT_BROKER_ENV] context.cb = CB(protocol=context.props_cb_env["CB_PROTOCOL"], host=context.props_cb_env["CB_HOST"], port=context.props_cb_env["CB_PORT"]) context.resp = context.cb.get_cache_statistics_request() __logger__.info("..Sent a statistics request correctly") @step(u'delete database in mongo') def delete_database_in_mongo(context): """ Delete database used in mongo :param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave. """ fiware_service_header = u'Fiware-Service' orion_prefix = u'orion' database_name = orion_prefix props_mongo = properties_class.read_properties()[MONGO_ENV] # mongo properties dict mongo = Mongo(host=props_mongo["MONGO_HOST"], port=props_mongo["MONGO_PORT"], user=props_mongo["MONGO_USER"], password=props_mongo["MONGO_PASS"]) headers = context.cb.get_headers() if fiware_service_header in headers: if headers[fiware_service_header] != EMPTY: if headers[fiware_service_header].find(".") < 0: database_name = "%s-%s" % (database_name, headers[fiware_service_header].lower()) else: postfix = headers[fiware_service_header].lower()[0:headers[fiware_service_header].find(".")] database_name = "%s-%s" % (database_name, postfix) __logger__.debug("Deleting database \"%s\" in mongo..." % database_name) mongo.connect(database_name) mongo.drop_database() mongo.disconnect() __logger__.info("...Database \"%s\" is deleted" % database_name) @step(u'check in log, label "([^"]*)" and message "([^"]*)"') def check_in_log_label_and_text(context, label, text): """ Verify in log file if a label with a message exists :param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave. :param label: label to find :param text: text to find (begin since the end) """ __logger__.debug("Looking for in log the \"%s\" label and the \"%s\" text..." % (label, text)) props_cb_env = properties_class.read_properties()[CONTEXT_BROKER_ENV] remote_log = Remote_Log(file="%s/contextBroker.log" % props_cb_env["CB_LOG_FILE"], fabric=context.my_fab) line = remote_log.find_line(label, text) assert line is not None, " ERROR - the \"%s\" label and the \"%s\" text do not exist in the log" % (label, text) __logger__.info("log line: \n%s" % line) ngsi = NGSI() ngsi.verify_log(context, line) __logger__.info("...confirmed traces in log") @step(u'delay for "([^"]*)" seconds') def delay_for_seconds(context, seconds): """ delay for N seconds :param seconds: seconds to delay :param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave. """ __logger__.info("delay for \"%s\" seconds" % seconds) time.sleep(int(seconds)) @step(u'retrieve the log level') def retrieve_the_log_level(context): """ retrieve the log level in Context Broker :param context: It’s a clever place where you and behave can store information to share around. It runs at three levels, automatically managed by behave. """ __logger__.info("retrieving the log level in Context Broker") context.props_cb_env = properties_cl
cscanlin/Super-Simple-VLOOKUP-in-Python
python_vlookup/__init__.py
Python
mit
29
0
from python_vlookup import *
alisaifee/holmium.core
tests/support/cucumber/steps.py
Python
mit
823
0
from holmium.core import ( Page, Element, Locators, Elements, ElementMap, Section, Sections ) from holmium.core.cucumber import init_steps init_steps() class TestSection(Section): el = Element(Locators.NAME, "el") els = Elements(Locators.NAME, "els") elmap = ElementMap(Locators.NAME, "elmap") class TestSections(Sections): el = Element(Locators.NAME, "el") class TestPage(Page): el = Element(Locators.NAME, "el") els = Elements(Locators.NAME, "els"
) elmap = ElementMap(Locators.NAME, "elmap") sections = TestSections(Locators.NAME, "sections") section = TestSection(Locators.NAME, "section") def do_stuff(self, a, b): return a + b def do_stuff_no_args(self): return True def do_stuff_var_args(self, *a
rgs, **kwargs): return args, kwargs
danirus/blognajd
blognajd/tests/test_sitemaps.py
Python
mit
1,817
0
from django.core.urlresolvers import reverse from django.test import TestCase as DjangoTestCase from blognajd.models import Story, SiteSettings from blognajd.sitemaps import StaticSite
map, StoriesSitemap class StaticSitemap1TestCase(DjangoTestCase): fixtures = ['sitesettings_tests.json'] def test_staticsitemap_items_disabled(self): sitesettings = SiteSettings.objects.get(pk=1) sitesettings.has_about_page = False sitesettings.has_projects_page = False sitesettings.has_contact_page = False sitesettings.save() self.assertEqual([i
for i in StaticSitemap().items()], ['blog']) def test_staticsitemap_items_enabled(self): sitesettings = SiteSettings.objects.get(pk=1) sitesettings.has_about_page = True sitesettings.has_projects_page = True sitesettings.has_contact_page = True sitesettings.save() self.assertEqual(sorted([i for i in StaticSitemap().items()]), ['about', 'blog', 'contact', 'projects']) def test_staticsitemap_location(self): sitemap = StaticSitemap() for item in sitemap.items(): if item == 'contact': urlname = 'contactme-get-contact-form' else: urlname = item self.assertEqual(sitemap.location(item), reverse(urlname)) class StoriesSitemapTestCase(DjangoTestCase): fixtures = ['story_tests.json'] def setUp(self): self.story = Story.objects.get(pk=1) self.sitemap = StoriesSitemap() def test_storiessitemap_items(self): self.assertEqual(len(self.sitemap.items()), 1) def test_storiessitemap_lastmod(self): for item in self.sitemap.items(): self.assertEqual(self.sitemap.lastmod(item), self.story.mod_date)
hgiemza/DIRAC
WorkloadManagementSystem/JobWrapper/Watchdog.py
Python
gpl-3.0
39,322
0.028915
######################################################################## # File : Watchdog.py # Author: Stuart Paterson ######################################################################## """ The Watchdog class is used by the Job Wrapper to resolve and monitor the system resource consumption. The Watchdog can determine if a running job is stalled and indicate this to the Job Wrapper. Furthermore, the Watchdog will identify when the Job CPU limit has been exceeded and fail jobs meaningfully. Information is returned to the WMS via the heart-beat mechanism. This also interprets control signals from the WMS e.g. to kill a running job. - Still to implement: - CPU normalization for correct comparison with job limit """ __RCSID__ = "$Id$" import os import re import time from DIRAC import S_OK, S_ERROR, gLogger from DIRAC.Core.Utilities import Time from DIRAC.Core.Utilities import MJF from DIRAC.Core.DISET.RPCClient import RPCClient from DIRAC.ConfigurationSystem.Client.Config import gConfig from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemInstance from DIRAC.Core.Utilities.ProcessMonitor import ProcessMonitor from DIRAC.Core.Utilities.TimeLeft.TimeLeft import TimeLeft from DIRAC.Core.Utilities.Subprocess import getChildrenPIDs class Watchdog( object ): ############################################################################# def __init__( self, pid, exeThread, spObject, jobCPUTime, memoryLimit = 0, processors = 1, systemFlag = 'linux', jobArgs = {} ): """ Constructor, takes system flag as argument. """ self.stopSigStartSeconds = int( jobArgs.get( 'StopSigStartSeconds', 1800 ) ) # 30 minutes self.stopSigFinishSeconds = int( jobArgs.get( 'StopSigFinishSeconds', 1800 ) ) # 30 minutes self.stopSigNumber = int( jobArgs.get( 'StopSigNumber', 2 ) ) # SIGINT self.stopSigRegex = jobArgs.get( 'StopSigRegex', None ) self.stopSigSent = False self.log = gLogger.getSubLogger( "Watchdog" ) self.systemFlag = systemFlag self.exeThread = exeThread self.wrapperPID = pid self.appPID = self.exeThread.getCurrentPID() self.spObject = spObject self.jobCPUTime = jobCPUTime self.memoryLimit = memoryLimit self.calibration = 0 self.initialValues = {} self.parameters = {} self.peekFailCount = 0 self.peekRetry = 5 self.processMonitor = ProcessMonitor() self.checkError = '' self.currentStats = {} self.initialized = False self.count = 0 #defaults self.testWallClock = 1 self.testDiskSpace = 1 self.testLoadAvg = 1 self.maxWallClockTime = 3 * 24 * 60 * 60 self.testCPUConsumed = 1 self.testCPULimit = 0 self.testMemoryLimit = 0 self.testTimeLeft = 1 self.pollingTime = 10 # 10 seconds self.checkingTime = 30 * 60 # 30 minute period self.minCheckingTime = 20 * 60 # 20 mins self.wallClockCheckSeconds = 5 * 60 # 5 minutes self.maxWallClockTime = 3 * 24 * 60 * 60 # e.g. 4 days self.jobPeekFlag = 1 # on / off self.minDiskSpace = 10 # MB self.loadAvgLimit = 1000 # > 1000 and jobs killed self.sampleCPUTime = 30 * 60 # e.g. up to 20mins sample self.jobCPUMargin = 20 # %age buffer before killing job self.minCPUWallClockRatio = 5 # ratio %age self.nullCPULimit = 5 # After 5 sample times return null CPU consumption kill job self.checkCount = 0 self.wallClockCheckCount = 0 self.nullCPUCount = 0 self.grossTimeLeftLimit = 10 * self.checkingTime self.timeLeftUtil = TimeLeft() self.timeLeft = 0 self.littleTimeLeft = False self.scaleFactor = 1.0 self.processors = processors ############################################################################# def initialize( self, loops = 0 ): """ Watchdog initialization. """ if self.initialized: self.log.info( 'Watchdog already initialized' ) return S_OK() else: self.initialized = True setup = gConfig.getValue( '/DIRAC/Setup', '' ) if not setup: return S_ERROR( 'Can not get the DIRAC Setup value' ) wms_instance = getSystemInstance( "WorkloadManagement" ) if not wms_instance: return S_ERROR( 'Can not get the WorkloadManagement system instance' ) self.section = '/Systems/WorkloadManagement/%s/JobWrapper' % wms_instance self.maxcount = loops self.log.verbose( 'Watchdog initialization' ) self.log.info( 'Attem
pting to Initialize Watchdog for: %s' % ( self.systemFlag ) ) # Test control flags self.testWa
llClock = gConfig.getValue( self.section + '/CheckWallClockFlag', 1 ) self.testDiskSpace = gConfig.getValue( self.section + '/CheckDiskSpaceFlag', 1 ) self.testLoadAvg = gConfig.getValue( self.section + '/CheckLoadAvgFlag', 1 ) self.testCPUConsumed = gConfig.getValue( self.section + '/CheckCPUConsumedFlag', 1 ) self.testCPULimit = gConfig.getValue( self.section + '/CheckCPULimitFlag', 0 ) self.testMemoryLimit = gConfig.getValue( self.section + '/CheckMemoryLimitFlag', 0 ) self.testTimeLeft = gConfig.getValue( self.section + '/CheckTimeLeftFlag', 1 ) # Other parameters self.pollingTime = gConfig.getValue( self.section + '/PollingTime', 10 ) # 10 seconds self.checkingTime = gConfig.getValue( self.section + '/CheckingTime', 30 * 60 ) # 30 minute period self.minCheckingTime = gConfig.getValue( self.section + '/MinCheckingTime', 20 * 60 ) # 20 mins self.maxWallClockTime = gConfig.getValue( self.section + '/MaxWallClockTime', 3 * 24 * 60 * 60 ) # e.g. 4 days self.jobPeekFlag = gConfig.getValue( self.section + '/JobPeekFlag', 1 ) # on / off self.minDiskSpace = gConfig.getValue( self.section + '/MinDiskSpace', 10 ) # MB self.loadAvgLimit = gConfig.getValue( self.section + '/LoadAverageLimit', 1000 ) # > 1000 and jobs killed self.sampleCPUTime = gConfig.getValue( self.section + '/CPUSampleTime', 30 * 60 ) # e.g. up to 20mins sample self.jobCPUMargin = gConfig.getValue( self.section + '/JobCPULimitMargin', 20 ) # %age buffer before killing job self.minCPUWallClockRatio = gConfig.getValue( self.section + '/MinCPUWallClockRatio', 5 ) # ratio %age self.nullCPULimit = gConfig.getValue( self.section + '/NullCPUCountLimit', 5 ) # After 5 sample times return null CPU consumption kill job if self.checkingTime < self.minCheckingTime: self.log.info( 'Requested CheckingTime of %s setting to %s seconds (minimum)' % ( self.checkingTime, self.minCheckingTime ) ) self.checkingTime = self.minCheckingTime # The time left is returned in seconds @ 250 SI00 = 1 HS06, # the self.checkingTime and self.pollingTime are in seconds, # thus they need to be multiplied by a large enough factor self.fineTimeLeftLimit = gConfig.getValue( self.section + '/TimeLeftLimit', 150 * self.pollingTime ) self.scaleFactor = gConfig.getValue( '/LocalSite/CPUScalingFactor', 1.0 ) return S_OK() def run( self ): """ The main watchdog execution method """ result = self.initialize() if not result['OK']: self.log.always( 'Can not start watchdog for the following reason' ) self.log.always( result['Message'] ) return result try: while True: self.log.debug( 'Starting watchdog loop # %d' % self.count ) start_cycle_time = time.time() result = self.execute() exec_cycle_time = time.time() - start_cycle_time if not result[ 'OK' ]: self.log.error( "Watchdog error during execution", result[ 'Message' ] ) break elif result['Value'] == "Ended": break self.count += 1 if exec_cycle_time < self.pollingTime: time.sleep( self.pollingTime - exec_cycle_time ) return S_OK() except Exception: self.log.exception() return S_ERROR( 'Exception' ) ##################################################
thopiekar/Uranium
tests/Math/TestPolygon.py
Python
lgpl-3.0
9,024
0.014738
# Copyright (c) 2017 Ultimaker B.V. # Uranium is released under the terms of the LGPLv3 or higher. from UM.Math.Polygon import Polygon from UM.Math.Float import Float import numpy import math import pytest pytestmark = pytest.mark.skip(reason = "Incomplete tests") class TestPolygon: def setup_method(self, method): # Called before the first testfunction is executed pass def teardown_method(self, method): # Called after the last testfunction was executed pass ## The individual test cases for mirroring polygons. test_mirror_data = [ ({ "points": [[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]], "axis_point": [0, 0], "axis_direction": [0, 1], "answer": [[-1.0, 2.0], [-2.0, 0.0], [0.0, 0.0]], "label": "Mirror Horizontal", "description": "Test mirroring a polygon horizontally." }), ({ "points": [[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]], "axis_point": [0, 0], "axis_direction": [1, 0], "answer": [[1.0, -2.0], [2.0, 0.0], [0.0, 0.0]], "label": "Mirror Vertical", "description": "Test mirroring a polygon vertically." }), ({ "points": [[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]], "axis_point": [10, 0], "axis_direction": [0, 1], "answer": [[19.0, 2.0], [18.0, 0.0], [20.0, 0.0]], "label": "Mirror Horizontal Far", "description": "Test mirrorring a polygon horizontally on an axis that is not through the origin." }), ({ "points": [[0.0, 0.0], [2.0, 0.0], [1.0, 2.0]], "axis_point": [0, 4], "axis_direction": [1, 1], "answer": [[-2.0, 5.0], [-4.0, 6.0], [-4.0, 4.0]], "label": "Mirror Diagonal", "description": "Test mirroring a polygon diagonally." }), ({ "points": [[10.0, 0.0]], "axis_point": [0, 0], "axis_direction": [0, 1], "answer": [[-10.0, 0.0]], "label": "Mirror Single Vertex", "description": "Test mirroring a polygon with only one vertex." }), ({ "points": [], "axis_point": [0, 0], "axis_direction": [1, 0], "answer": [], "label": "Mirror Empty", "description": "Test mirroring an empty polygon." }) ] ## Tests the mirror function. # # \param data The data of the test. Must include a list of points of the # polygon to mirror, a point on the axis, a direction of the axis and an # answer that is the result of the mirroring. @pytest.mark.parametrize("data", test_mirror_data) def test_mirror(self, data): polygon = Polygon(numpy.array(data["points"], numpy.float32)) #Create a polygon with the specified points. polygon.mirror(data["axis_point"], data["axis_direction"]) #Mirror over the specified axis. points = polygon.getPoints() assert len(points) == len(data["points"]) #Must have the same amount of vertices. for point_index in range(len(points)): assert len(points[point_index]) == len(data["answer"][point_index]) #Same dimensionality (2). for dimension in range(len(points[point_index])): assert Float.fuzzyCompare(points[point_index][dimension], data["answer"][point_index][dimension]) #All points must be equal. ## The individual test cases for the projection tests. test_project_data = [ ({ "normal": [0.0, 1.0], "answer": [1.0, 2.0], "label": "Project Vertical", "description": "Project the polygon onto a vertical line." }), ({ "normal": [1.0, 0.0], "answer": [0.0, 1.0], "label": "Project Horizontal", "description": "Project the polygon onto a horizontal line." }), ({ "normal": [math.sqrt(0.5), math.sqrt(0.5)], "answer": [math.sqrt(0.5), math.sqrt(4.5)], "label": "Project Diagonal", "description": "Project the polygon onto a diagonal line." }) ] ## Tests the project function. # # \param data The data of the test. Must include a normal vector to # project on and a pair of coordinates that is the answer. @pytest.mark.parametrize("data", test_project_data) def test_project(self, data): p = Polygon(numpy.array([ [0.0, 1.0], [1.0, 1.0], [1.0, 2.0], [0.0, 2.0] ], numpy.float32)) result = p.project(data["normal"]) #Project the polygon onto the specified normal vector. assert len(result) == len(data["answer"]) #Same dimensionality (2). for dimension in range(len(result)): assert Float.fuzzyCompare(result[dimension], data["answer"][dimension]) ## The individual test cases for the intersection tests. test_intersect_data = [ ({ "polygon": [[ 5.0, 0.0], [15.0, 0.0], [15.0, 10.0], [ 5.0, 10.0]], "answer": [-5.0, 0.0], "label": "Intersect Simple", "description": "Intersect with a polygon that fully intersects." }), ({ "polygon": [[-5.0, 0.0], [ 5.0, 0.0], [ 5.0, 10.0], [-5.0, 10.0]], "answer": [ 5.0, 0.0], "label": "Intersect Left", "description": "Intersect with a polygon on the negative x-axis side that fully intersects." }), ({ "polygon": [[ 0.0, 5.0], [10.0, 5.0], [10.0, 15.0], [ 0.0, 15.0]], "answer": [ 0.0, -5.0], "label": "Intersect Straight Above", "description": "Intersect with a polygon that is exactly above the base polygon (edge case)." }), ({ "polygon": [[ 0.0, -5.0], [10.0, -5.0], [10.0, 5.0], [ 0.0, 5.0]], "answer": [ 0.0, 5.0], "label": "Intersect Straight Left", "description": "Intersect with a polygon that is exactly left of the base polygon (edge case)." }), ({ "polygon": [[ 5.0, 5.0], [15.0, -5.0], [30.0, 5.0], [15.0, 15.0]], "answer": [-5.0, 0.0], "label": "Intersect Rotated", "description": "In
tersect with a rotated square." }), ({ "polygon": [[15.0, 0.0], [25.0, 0.0], [25.0, 10.0], [15.0, 10.0]], "answer": None, "label": "Intersect Miss", "description": "Intersect with a polygon that doesn't intersect at all." }) ] ## Tests the polygon intersect function. # # Every test case intersects a par
ametrised polygon with a base square of # 10 by 10 units at the origin. # # \param data The data of the test. Must include a polygon to intersect # with and a required answer. @pytest.mark.parametrize("data", test_intersect_data) def test_intersectsPolygon(self, data): p1 = Polygon(numpy.array([ #The base polygon to intersect with. [ 0, 0], [10, 0], [10, 10], [ 0, 10] ], numpy.float32)) p2 = Polygon(numpy.array(data["polygon"])) #The parametrised polygon to intersect with. #Shift the order of vertices in both polygons around. The outcome should be independent of what the first vertex is. for n in range(0, len(p1.getPoints())): for m in range(0, len(data["polygon"])): result = p1.intersectsPolygon(p2) if not data["answer"]: #Result should be None. assert result == None else: assert result != None for i in range(0, len(data["answer"])): assert Float.fuzzyCompare(result[i], data["answer"][i]) p2.setPoints(numpy.roll(p2.getPoints(), 1, axis = 0)) #Shift p2. p1.setPoints(numpy.roll(p1.getPoints(), 1, axis = 0)) #Shift p1. ## The individual test cases for convex hull intersection tests. test_intersectConvex_data = [ ({ "p1": [[-42, -32], [-42, 12], [62, 12], [62, -32]], "p2": [[-62, -12], [-62, 32], [42, 32], [42, -12]], "answer": [[-42, -12], [-42, 12], [42, 12], [42, -12]], "label": "UM2 Fans", "description": "A simple intersection without edge cases of UM2 fans collision area." }) ] ## Tests the convex hull intersect function. # # \param data The data of the test case. Must include two polygons and a # required result polygon. @pytest.mark.parametrize("data", test_intersectConvex_data) def test_intersectConvexHull(self, data): p1 = Polygon(numpy.array(data["p1"])) p2 = Polygon(numpy.array(data["p2"])) result = p1.intersectionConvexHulls(p2) assert len(result.getPoints()) == len(data["answer"]) #Same amount of vertices. isCorrect = False for rotation in range(0, len(result.getPoints())): #The order of vertic
kuralabs/flowbber
lib/flowbber/plugins/sinks/print.py
Python
apache-2.0
1,764
0
# -*- coding: utf-8 -*- # # Copyright (C) 2017-2018 KuraLabs S.R.L # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Print ===== This sink plugin will pretty print all collected data to ``stdout``. This module uses third party module pprintpp_ for better pretty printing of large data structures. .. _pprintpp: https://github.com/wolever/pprintpp .. important:: This class inherits several inclusion and exclusion configuration options for filtering data before
using it. See :ref:`filter-sink-options` for more information. **Dependencies:** .. code-block:: sh pip3 install flowbber[prin
t] **Usage:** .. code-block:: toml [[sinks]] type = "print" id = "..." .. code-block:: json { "sinks": [ { "type": "print", "id": "...", "config": {} } ] } """ from flowbber.logging import print from flowbber.components import FilterSink class PrintSink(FilterSink): def declare_config(self, config): super().declare_config(config) def distribute(self, data): from pprintpp import pformat # Allow to filter data super().distribute(data) print(pformat(data)) __all__ = ['PrintSink']
jdepoix/goto_cloud
goto_cloud/commander/public.py
Python
mit
33
0
from .commander import Com
mander
UCSD-PL/kraken
reflex/test/webserver/listener.py
Python
gpl-2.0
678
0.023599
#!/usr/bin/env python2.7 import msg, socket, time import uuid HOST_NAME = 'localhost' # !!!REMEMBER TO CHANGE THIS!!! PORT_NUMBER = 9000 # Maybe set this to 9000. def debug(s): print(" L: " + s) if __name__ == '__main__': msg.init() debug("Spawned")
s = socket.socket() try: s.bind((HOST_NAME, PORT_NUMBER)) print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER) while True: s.listen(0) (bs, addr) = s.accept() debug('Accepting: ' + str(addr)) msg.send(msg.LoginReq, 'default', ' ', str(uuid.uuid4()), bs, bs) except: s.close() print ti
me.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
lfalcao/thumbor
vows/upload_api_vows.py
Python
mit
26,572
0.002032
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/globocom/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com timehome@corp.globo.com from os.path import abspath, join, dirname, exists import re from pyvows import Vows, expect from tornado_pyvows.context import TornadoHTTPContext from shutil import rmtree from thumbor.app import ThumborServiceApp from thumbor.config import Config from thumbor.importer import Importer from thumbor.context import Context import urllib import hashlib import mimetypes file_storage_root_path = '/tmp/thumbor-vows/storage' file_path = '' ## # Images used for tests : # - valid image : JPEG 620x465, 69.88 KB # - too small image : JPEG 20x20, 822 B # - too weight image : JPEG 300x400, 85.32 KB ## def valid_image(): path = abspath(join(dirname(__file__), u'fixtures/alabama1_ap620é.jpg')) with open(path, 'r') as stream: body = stream.read() return body def too_small_image(): path = abspath(join(dirname(__file__), 'crocodile.jpg')) with open(path, 'r') as stream: body = stream.read() return body def too_weight_image(): path = abspath(join(dirname(__file__), 'fixtures/conselheira_tutelar.jpg')) with open(path, 'r') as stream: body = stream.read() return body if exists(file_storage_root_path): rmtree(file_storage_root_path) ## # Path on file system (filestorage) ## def path_on_filesystem(path): digest = hashlib.sha1(path).hexdigest() return join(file_storage_root_path.rstrip('/'), digest[:2] + '/' + digest[2:]) def encode_multipart_formdata(fields, files): BOUNDARY = 'thumborUploadFormBoundary' CRLF = '\r\n' L = [] for key, value in fields.items(): L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"' % key) L.append('') L.append(value) for (key, filename, value) in files: L.append('--' + BOUNDARY) L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or 'application/octet-stream') L.append('') L.append(value) L.append('') L.append('') L.append('--' + BOUNDARY + '--') body = CRLF.join([str(item) for item in L]) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body ## # Image Context defining post / put / delete / get ## class ImageContext(TornadoHTTPContext): def __init__(self, *args, **kw): super(ImageContext, self).__init__(*args, **kw) self.ignore('get', 'post', 'put', 'delete', 'post_fil
es') self.base_uri = "/image" def get(self, path, headers): return self.fetch(path, method='GET', body=urllib.urlencode({}, doseq=True), headers=
headers, allow_nonstandard_methods=True) def post(self, path, headers, body): return self.fetch(path, method='POST', body=body, headers=headers, allow_nonstandard_methods=True) def put(self, path, headers, body): return self.fetch(path, method='PUT', body=body, headers=headers, allow_nonstandard_methods=True) def delete(self, path, headers): return self.fetch(path, method='DELETE', body=urllib.urlencode({}, doseq=True), headers=headers, allow_nonstandard_methods=True) def post_files(self, path, data={}, files=[]): multipart_data = encode_multipart_formdata(data, files) return self.fetch(path, method='POST', body=multipart_data[1], headers={ 'Content-Type': multipart_data[0] }, allow_nonstandard_methods=True) ## # Upload new images with POST method ## @Vows.batch class PostingANewImage(ImageContext): def get_app(self): self.default_filename = 'image' cfg = Config() cfg.UPLOAD_ENABLED = True cfg.UPLOAD_PHOTO_STORAGE = 'thumbor.storages.file_storage' cfg.FILE_STORAGE_ROOT_PATH = file_storage_root_path cfg.UPLOAD_DELETE_ALLOWED = False cfg.UPLOAD_PUT_ALLOWED = False cfg.UPLOAD_DEFAULT_FILENAME = self.default_filename importer = Importer(cfg) importer.import_modules() ctx = Context(None, cfg, importer) application = ThumborServiceApp(ctx) return application ## # Posting a new image with a filename through the REST API ## class WhenPostingANewImageWithAFilename(ImageContext): def topic(self): self.filename = 'new_image_with_a_filename.jpg' response = self.post(self.base_uri, {'Content-Type': 'image/jpeg', 'Slug': self.filename}, valid_image()) return response class HttpStatusCode(ImageContext): def topic(self, response): return response.code def should_be_201_created(self, topic): expect(topic).to_equal(201) class HttpHeaders(ImageContext): def topic(self, response): return response.headers def should_contain_a_location_header_containing_the_filename(self, headers): expect(headers).to_include('Location') expect(headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + self.filename) ## # Posting a new image with a uncommon charset (including charset) through the REST API ## class WhenPostingANewImageWithCharsetInContentType(ImageContext): def topic(self): self.filename = self.default_filename + '.jpg' response = self.post(self.base_uri, {'Content-Type': 'image/jpeg;charset=UTF-8'}, valid_image()) return response class HttpStatusCode(ImageContext): def topic(self, response): return response.code def should_be_201_created(self, topic): expect(topic).to_equal(201) class HttpHeaders(ImageContext): def topic(self, response): return response.headers def should_contain_a_location_header_containing_the_filename(self, headers): expect(headers).to_include('Location') expect(headers['Location']).to_match(self.base_uri + r'/[^\/]{32}/' + self.filename) class Image(ImageContext): def topic(self, response): return re.compile(self.base_uri + r'/([^\/]{32})/' + self.filename).search( response.headers['Location']).group(1) def should_be_store_at_right_path(self, topic): path = path_on_filesystem(topic) expect(exists(path)).to_be_true() ## # Posting a new valid image with a unknown charset through the REST API ## class WhenPostingANewValidImageWithUnknwonCharsetInContentType(ImageContext): def topic(self): self.filename = self.default_filename + '.jpg' response = self.post(self.base_uri, {'Content-Type': 'image/thisIsAUnknwonOrBadlyFormedCHarset'}, valid_image()) return response class HttpStatusCode(ImageContext): def topic(self, response): return response.code def should_be_201_created(self, topic): expect(topic).to_equal(201) class HttpHeaders(ImageContext): def topic(self, response): return response.headers def should_contain_a_location_header_containing_the_filename(self, headers): expect(headers).to_include('Location')
mlperf/training_results_v0.7
Google/benchmarks/gnmt/implementations/gnmt-research-TF-tpu-v4-512/utils/vocab_utils.py
Python
apache-2.0
6,145
0.009276
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility to handle vocabularies.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import codecs import os import tensorflow.compat.v1 as tf from REDACTED.tensorflow.python.ops import lookup_ops from REDACTED.nmt.utils import misc_utils as utils # word level special token UNK = "<unk>" SOS = "<s>" EOS = "</s>" UNK_ID = 0 # char ids 0-255 come from utf-8 encoding bytes # assign 256-300 to special chars BOS_CHAR_ID = 256 # <begin sentence> EOS_CHAR_ID = 257 # <end sentence> BOW_CHAR_ID = 258 # <begin word> EOW_CHAR_ID = 259 # <end word> PAD_CHAR_ID = 260 # <padding> DEFAULT_CHAR_MAXLEN = 50 # max number of chars for each word. def _string_to_bytes(text, max_length): """Given string and length, convert to byte seq of at most max_length. This process mimics docqa/elmo's preprocessing: https://github.com/allenai/document-qa/blob/master/docqa/elmo/data.py Note that we make use of BOS_CHAR_ID and EOS_CHAR_ID in iterator_utils.py & our usage differs from docqa/elmo. Args: text: tf.string tensor of shape [] max_length: max number of chars for each word. Returns: A tf.int32 tensor of the byte encoded text. """ byte_ids = tf.to_int32(tf.decode_raw(text, tf.uint8)) byte_ids = byte_ids[:max_length - 2] padding = tf.fill([max_length - tf.shape(byte_ids)[0] - 2], PAD_CHAR_ID) byte_ids = tf.concat( [[BOW_CHAR_ID], byte_ids, [EOW_CHAR_ID], padding], axis=0) tf.logging.info(byte_ids) byte_ids = tf.reshape(byte_ids, [max_length]) tf.logging.info(byte_ids.get_shape().as_list()) return byte_ids + 1 def tokens_to_bytes(tokens): """Given a sequence of strings, map to sequence of bytes. Args: tokens: A tf.st
ring tensor Returns: A tensor of shape words.shape + [bytes_per_word] containing byte versions of each word. """ bytes_per_word = DEFAULT_CHAR_MAXLEN with tf.device("/cpu:0"): tf.assert_rank(tokens, 1)
shape = tf.shape(tokens) tf.logging.info(tokens) tokens_flat = tf.reshape(tokens, [-1]) as_bytes_flat = tf.map_fn( fn=lambda x: _string_to_bytes(x, max_length=bytes_per_word), elems=tokens_flat, dtype=tf.int32, back_prop=False) tf.logging.info(as_bytes_flat) as_bytes = tf.reshape(as_bytes_flat, [shape[0], bytes_per_word]) return as_bytes def load_vocab(vocab_file): vocab = [] with codecs.getreader("utf-8")(tf.gfile.GFile(vocab_file, "rb")) as f: vocab_size = 0 for word in f: vocab_size += 1 vocab.append(word.strip()) return vocab, vocab_size def check_vocab(vocab_file, out_dir, check_special_token=True, sos=None, eos=None, unk=None): """Check if vocab_file doesn't exist, create from corpus_file.""" if tf.gfile.Exists(vocab_file): utils.print_out("# Vocab file %s exists" % vocab_file) vocab, vocab_size = load_vocab(vocab_file) if check_special_token: # Verify if the vocab starts with unk, sos, eos # If not, prepend those tokens & generate a new vocab file if not unk: unk = UNK if not sos: sos = SOS if not eos: eos = EOS assert len(vocab) >= 3 if vocab[0] != unk or vocab[1] != sos or vocab[2] != eos: utils.print_out("The first 3 vocab words [%s, %s, %s]" " are not [%s, %s, %s]" % (vocab[0], vocab[1], vocab[2], unk, sos, eos)) vocab = [unk, sos, eos] + vocab vocab_size += 3 new_vocab_file = os.path.join(out_dir, os.path.basename(vocab_file)) with codecs.getwriter("utf-8")( tf.gfile.GFile(new_vocab_file, "wb")) as f: for word in vocab: f.write("%s\n" % word) vocab_file = new_vocab_file else: raise ValueError("vocab_file '%s' does not exist." % vocab_file) vocab_size = len(vocab) return vocab_size, vocab_file def create_vocab_tables(src_vocab_file): """Creates vocab tables for src_vocab_file and tgt_vocab_file.""" src_vocab_table = lookup_ops.index_table_from_file( src_vocab_file, default_value=UNK_ID) tgt_vocab_table = src_vocab_table return src_vocab_table, tgt_vocab_table def load_embed_txt(embed_file): """Load embed_file into a python dictionary. Note: the embed_file should be a Glove/word2vec formatted txt file. Assuming Here is an exampe assuming embed_size=5: the -0.071549 0.093459 0.023738 -0.090339 0.056123 to 0.57346 0.5417 -0.23477 -0.3624 0.4037 and 0.20327 0.47348 0.050877 0.002103 0.060547 For word2vec format, the first line will be: <num_words> <emb_size>. Args: embed_file: file path to the embedding file. Returns: a dictionary that maps word to vector, and the size of embedding dimensions. """ emb_dict = dict() emb_size = None is_first_line = True with codecs.getreader("utf-8")(tf.gfile.GFile(embed_file, "rb")) as f: for line in f: tokens = line.rstrip().split(" ") if is_first_line: is_first_line = False if len(tokens) == 2: # header line emb_size = int(tokens[1]) continue word = tokens[0] vec = list(map(float, tokens[1:])) emb_dict[word] = vec if emb_size: if emb_size != len(vec): utils.print_out( "Ignoring %s since embeding size is inconsistent." % word) del emb_dict[word] else: emb_size = len(vec) return emb_dict, emb_size
kosugawala/jubatus-weather
python/jubaweather/main.py
Python
mit
2,275
0.024176
import argparse import yaml from jubatus.classifier.client import classifier from jubatus.classifier.types import * from jubaweather.version import get_version def parse_options(): parser = argparse.ArgumentParser() parser.add_argument( '-a', required = True, help = 'analyze data file (YAML)', metavar = 'FILE', dest = 'analyzedata' ) parser.add_argument( '-t', help = 'train data file (CSV)', metavar = 'FILE', dest = 'traindata' ) parser.add_argument( '-v', '--version', action = 'version', version = '%(prog)s ' + get_version() ) return parser.parse_args() def main(): args = parse_options() client = classifier('127.0.0.1', 9199) # train num = 0 if args.traindata: with open(args.traindata, 'rU') as traindata: for data in traindata: # skip comments if not len(data) or data.startswith('#'): continue num += 1 season, avetemp, maxtemp, mintemp, pressure, humidity = map(str.strip, data.strip().split(',')) num_values = [ [
'avetemp', float(avetemp)], ['maxtemp', float(maxtemp)], ['mintemp', float(mintemp)], ['pressure', float(pressure)], ['humidity', float(humidity)] ] d = datum([], num_values) train_data = [[season, d]] # train client.train('', train_data) # print train number print 'train ...', num # save train model print "save :", client.save('', "we
ather") # anaylze with open(args.analyzedata, 'r') as analyzedata: weather = yaml.load(analyzedata) for k, v in weather.iteritems(): print str(k), "(", str(v['season']), ")" num_values = [ ['avetemp', float(v['avetemp'])], ['maxtemp', float(v['maxtemp'])], ['mintemp', float(v['mintemp'])], ['pressure', float(v['pressure'])], ['humidity', float(v['humidity'])] ] d = datum([], num_values) analyze_data = [d] results = client.classify('', analyze_data) results[0].sort(key=lambda x: x.score, reverse=True) for result in results: for i in range(5): print result[i].label, result[i].score print
paulmartel/voltdb
examples/metrocard/exportServer.py
Python
agpl-3.0
6,191
0.004361
#!/usr/bin/env python # This file is part of VoltDB. # Copyright (C) 2008-2016 VoltDB Inc. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, # ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR # OTHER DEALINGS IN THE SOFTWARE. """ Simple web server Usage: python exportServer.py [port=<listener port>] method "exportRows": receives rows from the VoltDB server via "GET". processes according to "notify" preference in the row updates queue of last N notifications, either text or email method "htmlRows" returns a static html page of latest MAXROWS exported rows The request contains the row, URL-encoded and packed like this example row: "CardId=161771&ExportTime=1429897821439&StationName=Ruggles&Name=J%20Ryder%20161771&Phone=6174567890&Email=jryder%40gmail.com&Notify=0&AlertMessage=Insufficient%20Balance" Basic web server fragments from http://lucumr.pocoo.org/2007/5/21/getting-started-with-wsgi/ """ import sys, os from time import ctime from urlparse import parse_qs try: from twilio.rest import TwilioRestClient except ImportError: raise ImportError("Add Twilio module if you want to try sending text or email messages on export \"alerts\"\nOtherwise remove this import and method below.") from wsgiref.simple_server import make_server def checkPhoneNum(phone): # Twilio wants a leading country code if len(phone) > 2 and phone[0:1] != "+1": return "+1" + phone else: return phone def sendSMS(toPhone): # Account Sid and Auth Token from twilio.com/user/account # ACCOUNT SID accountSid = "AC543483989cdc36a28dc297572e096b1a" # AUTH TOKEN authToken = "6ccf0793c2b5d70424a0f2dffeb7e7e2" client = TwilioRestClient(accountSid, authToken) message = client.messages.create(body="Twilio test message <3", to=checkPhoneNum(toPhone), from_="+16173963192") # My Twilio number print message.sid def processRow(row): """ For each row, check notify flag and process accordingly: notify == 0: no notification notify == 1: email notify == 2: text message to phone number """ try: # print "Rider %s, \"%s\" at station %s: " % (row["Name"][0], row["AlertMessage"][0], row["StationName"][0]), if row["Notify"][0] == "1": # sendEmail(row["Email"][0]) updateHTML(ctime(int(row["ExportTime"][0])/1000), row["Name"][0], row["Email"][0], "Complete") # print "email sent to %s" % row["Email"][0] elif row["Notify"][0] == "2": # sendSMS(row["Phone"][0]) updateHTML(ctime(int(row["ExportTime"][0])/1000), row["Name"][0], row["Phone"][0], "Complete") # print "text message sent to %s" % row["Phone"][0] else: print "Not notified" except KeyError as err: print "Exception: key %s not in GET query string." % err except: print "Unknown exception in processRow" def htmlRows(environ, start_response): start_response('200 OK', [('Content-Type', 'text/html')]) return assembleTable() rowQueue = [] MAXROWS = 10 def assembleTable(): """ Create an html page with latest N rows """ table = "<html><body><meta http-equiv=\"refresh\" content=\"5\"><table border=\"1\"><tr><th>Event Time</th><th>Name</th><th>Contact</th><th>Status</th></tr>" # print "RowQueue: " + str(rowQueue) for r in rowQueue: table += r table += "</table></body></html>" # print "Table: " + str(table) return table def updateHTML(time, name, contact, status): """ Keep a queue of last 10 exported rows """ if len(rowQueue) >= MAXROWS: dc = rowQueue.pop(0) rowQueue.append("<tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" % (time, name, contact, status)) def htmlRows(env
iron, start_response): start_response('200 OK', [('Content-Type', 'text/html')])
return assembleTable() def exportRows(environ, start_response): start_response('200 OK', [('Content-Type', 'text/html')]) if "QUERY_STRING" in environ: row = parse_qs(environ["QUERY_STRING"]) processRow(row) return [] getFuncs = { "htmlRows" : htmlRows, "exportRows" : exportRows, } def application(environ, start_response): """ The main WSGI application. Dispatch the current request to the functions either "exportRows", the VoltDB export endpoint, or to "htmlRows", the source static html of recent notifications. If neither, call the `not_found` function. """ path = environ.get('PATH_INFO', '').lstrip('/') # print "Path: " + path if path in getFuncs: return getFuncs[path](environ, start_response) return not_found(environ, start_response) def not_found(environ, start_response): """Called if no URL matches.""" start_response('404 NOT FOUND', [('Content-Type', 'text/plain')]) return ['Not Found'] if __name__ == '__main__': if len(sys.argv) > 1 and "port" in sys.argv[1]: port = sys.argv[1].split("=")[1] else: port = 8083 try: httpd = make_server('', int(port), application) print('Serving on port %s...' % str(port)) httpd.serve_forever() except KeyboardInterrupt: print('Goodbye.')
ksavenkov/recsys-001
recsys/dataset.py
Python
mit
6,782
0.008552
import csv from collections import defaultdict class DataIO: '''Responsible for reading data from whatever source (CSV, DB, etc), normalizing indexes for processing and writing results in denormalized form. Also it provides an access to translation dictionaries between external and internal ids. Prettyprinting of the recommendation results is performed using special Printer classes supplied to the DataIO. ''' def __init__(self, verbose = True): self.__verbose = verbose self.ratings = [] # [(user, item, rating)] self.item_tags = [] # {(user, tag, count)} def load(self, ratings_file, tags_file = None, items_file = None): '''Loads the data from a proper source, and performs index normalization.''' self.__read_ratings(ratings_file) if tags_file: self.__read_tags(tags_file) if items_file: self.__read_titles(items_file) self.__normalize() re
turn def translate_users(self, old_user_ids): '''Takes an array of original user ids and translates them into the normalized form ''' return [self.new_user_idx(i) for i in old_user_ids] def translate_items(self, old_item_ids):
'''Takes an array of original item ids and translates them into the normalized form ''' return [self.new_item_idx(i) for i in old_item_ids] def num_items(self): '''Number of different items in the dataset''' return len(self.__old_item_idx) def num_users(self): '''Number of different users in the dataset''' return len(self.__old_user_idx) def num_tags(self): '''Number of different users in the dataset''' return len(self.__item_tags) def old_item_idx(self, idx): '''Old to new index conversion.''' return self.__old_item_idx[idx] def new_item_idx(self, idx): '''New to old index conversion.''' return self.__new_item_idx[idx] def old_user_idx(self, idx): '''Old to new index conversion.''' return self.__old_user_idx[idx] def new_user_idx(self, idx): '''New to old index conversion.''' return self.__new_user_idx[idx] pass def tags(self, idx): '''Get tag by index.''' return self.__item_tags[idx] def title(self, idx): '''Get title by (old) index.''' return self.item_titles[idx] def tag_idx(self, tag): '''Get tag index.''' return self.__item_tag_idx[tag] pass def __log(self, msg): if self.__verbose: print msg def __read_ratings(self, filename): self.__log('Reading (user_id,item_id,rating) tuples from %s' % filename) with open(filename, 'rbU') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',') self.ratings = [(int(r[0]),int(r[1]),float(r[2])) for r in csv_reader] self.__log('\tread %d entries' % len(self.ratings)) csvfile.close() return def __read_tags(self, filename): self.__log('Reading (item_id,tag) tuples from %s' % filename) with open(filename, 'rbU') as csvfile: csv_reader = csv.reader(csvfile, delimiter=',') tagdict = defaultdict(int) # aggregate item-tag pairs for (item, tag) in csv_reader: tagdict[(int(item),tag)] += 1 # turn it into a list self.item_tags = [(i,t,c) for ((i,t),c) in tagdict.items()] self.__log('\tread %d entries' % len(self.item_tags)) csvfile.close() def __read_titles(self, filename): self.__log('Reading (item_id,title) tuples from %s' % filename) with open(filename, 'rbU') as csvfile: self.item_titles = dict() csv_reader = csv.reader(csvfile, delimiter=',') # aggregate item-tag pairs for (item, title) in csv_reader: self.item_titles[int(item)] = title # turn it into a list self.__log('\tread %d entries' % len(self.item_titles.keys())) csvfile.close() def __normalize(self): '''Normalize the data by creating custom user and item indexes''' # collect all user, item and tag values (users_rated, items_rated) = zip(*self.ratings)[:2] (items_tagged, tags) = zip(*self.item_tags)[:2] if self.item_tags else ([],[]) # normalize all of them by creating conversion dictionaries (self.__old_user_idx, self.__new_user_idx) = self.__normalize_idx(users_rated) (self.__old_item_idx, self.__new_item_idx) = self.__normalize_idx(items_rated, items_tagged) (self.__item_tags, self.__item_tag_idx) = self.__normalize_idx(tags) # translate everything into normalized indexes #TODO: inplace change may be more efficient self.ratings = [(self.__new_user_idx[u], self.__new_item_idx[i], r) for (u,i,r) in self.ratings] self.item_tags = [(self.__new_item_idx[i], self.__item_tag_idx[t], c) for (i,t,c) in self.item_tags] def __normalize_idx(self, *idx_list): '''idx_list is a list of indexes with duplicates and gaps returns an array of old indexes and a dict old_idx -> new_idx two arrays is due to dict.keys() are unsorted ''' # 1. merge value lists, remove duplicates and sort old_idx = sorted(set().union(*idx_list)) # 2. add new normalized index and put to a dict return old_idx, dict(zip(old_idx,range(len(old_idx)))) def print_recs(self, recs, given_items = None, given_users = None, printer = None): '''Stringifies recommendations along with given items or users, after translating them into original index system. It takes recommendations and given_items or given users in normalized form, translate them to the original indexes and passes to the printer function. Depending of what was passed, either given_users, or given_items, or None is passed to the printer function as the first argument. ''' # translate given objects to the original indexes given = [self.old_user_idx(u) for u in given_users] if given_users else [self.old_item_idx(i) for i in given_items] if given_items else None # translate recommended items to the original indexes recs = [[(self.old_item_idx(i),s) for i,s in r] for r in recs] return printer(given, recs) if printer else default_printer(given, recs) def default_printer(given, recs): return '\n'.join(['%d: ' % i + ', '.join(['(%d,%.2f)' % (j,s) for j,s in r]) for i,r in zip(given,recs)])
ohmini/thaifoodapi
thaifood/models.py
Python
bsd-3-clause
6,666
0.00063
##-*-coding: utf-8 -*- from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Usage(models.Model): ip = models.CharField(max_length=50) method = models.CharField(max_length=3) path = models.CharField(max_length=100) params = models.CharField(max_length=255) def __str__(self): return self.ip @python_2_unicode_compatible class Element(models.Model): name = models.CharField(max_length=10) code = models.CharField(max_length=10) def __str__(self): return self.name class Meta: verbose_name = "ธาตุ" verbose_name_plural = "ธาตุต่างๆ" db_table = 'element' @python_2_unicode_compatible class Disease(models.Model): name = models.CharField(max_length=100, unique=True) description = models.CharField(max_length=255, null=True) is_congenital = models.BooleanField(default=False) created_by = models.CharField(max_length=50, null=True) created_date = models.DateTimeField(auto_now_add=True) last_modified = models.DateTimeField(auto_now=True, null=True) last_modified_by = models.CharField(max_length=30, null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name = "เชื้อโรค" verbose_name_plural = "กลุ่มเชื้อโรค" db_table = 'disease' class Nutrient(models.Model): water = models.DecimalField(max_digits=14, decimal_places=4) protein = models.DecimalField(max_digits=14, decimal_places=4) fat = models.DecimalField(max_digits=14, decimal_places=4) carbohydrate = models.DecimalField(max_digits=14, decimal_places=4) dietary_fiber = models.DecimalField(max_digits=14, decimal_places=4) ash = models.DecimalField(max_digits=14, decimal_places=4) calcium = models.DecimalField(max_digits=14, decimal_places=4) phosphorus = models.DecimalField(max_digits=14, decimal_places=4) iron = models.DecimalField(max_digits=14, decimal_places=4) retinol = models.DecimalField(max_digits=14, decimal_places=4) beta_carotene = models.DecimalField(max_digits=14, decimal_places=4) vitamin_a = models.DecimalField(max_digits=14, decimal_places=4) vitamin_e = models.DecimalField(max_digits=14, decimal_places=4) thiamin = models.DecimalField(max_digits=14, decimal_places=4) riboflavin = models.DecimalField(max_digits=14, decimal_places=4) niacin = models.DecimalField(max_digits=14, decimal_places=4) vitamin_c = models.DecimalField(max_digits=14, decimal_places=4) def __str__(self): return 'id: ' + str(self._get_pk_val()) class Meta: verbose_name = "สารอาหาร" verbose_name_plural = "กลุ่มสารอาหาร" db_table = 'nutrient' @python_2_unicode_compatible class IngredientCategory(models.Model): name = models.CharField(max_length=50, unique=True) created_by = models.CharField(max_length=50) created_date = models.DateTimeField(auto_now_add=True) last_modified = models.DateTimeField(auto_now=True, null=True) last_modified_by = models.CharField(max_length=30, null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name = "หมวดหมู่วัตถุดิบ" verbose_name_plural = "กลุ่มหมวดหมู่วัตถุดิบ" db_table = 'ingredient_type' @python_2_unicode_compatible class FoodCategory(models.Model): name = models.CharField(max_length=50, unique=True) created_by = models.CharField(max_length=50) created_date = models.DateTimeField(auto_now_add=True) last_modified = models.DateTimeField(auto_now=True, null=True) last_modified_by = models.CharField(max_length=30, null=True, blank=True) def __str__(self): return self.name class Meta: verbose_name = "หมวดหม
ู่อาหาร" verbose_name_plural = "กลุ่มหมวดหมู่อาหาร" db_table = 'food_type' @python_2_unicod
e_compatible class Ingredient(models.Model): name = models.CharField(max_length=100, unique=True) description = models.CharField(max_length=255, blank=True, null=True) calories = models.IntegerField(default=0) nutrient = models.ForeignKey(Nutrient, on_delete=models.SET_NULL, blank=True, null=True) element = models.ForeignKey(Element, on_delete=models.SET_NULL, blank=True, null=True) category = models.ManyToManyField(IngredientCategory, blank=True) healing = models.ManyToManyField(Disease, related_name="healing", blank=True) affect = models.ManyToManyField(Disease, related_name="affect", blank=True) code = models.IntegerField(default=0) def __str__(self): return self.name class Meta: verbose_name = "วัตถุดิบ" verbose_name_plural = "กลุ่มวัตถุดิบ" db_table = 'ingredient' @python_2_unicode_compatible class Food(models.Model): name = models.CharField(max_length=100, unique=True) description = models.CharField(max_length=255, blank=True, null=True, default="") calories = models.IntegerField(default=0) nutrient = models.ForeignKey(Nutrient, on_delete=models.SET_NULL, blank=True, null=True) ingredients = models.ManyToManyField(Ingredient, through='Menu') category = models.ManyToManyField(FoodCategory) created_by = models.CharField(max_length=50, default="") created_date = models.DateTimeField(auto_now_add=True) last_modified = models.DateTimeField(auto_now=True, null=True) last_modified_by = models.CharField(max_length=30, null=True, blank=True) code = models.IntegerField(default=0) def __str__(self): return self.name class Meta: verbose_name = "อาหาร" verbose_name_plural = "กลุ่มอาหาร" db_table = 'food' class Menu(models.Model): food = models.ForeignKey(Food, on_delete=models.CASCADE) ingredient = models.ForeignKey(Ingredient, on_delete=models.CASCADE) weight = models.DecimalField(max_digits=14, decimal_places=4) name = models.CharField(max_length=100, blank=True, default="") class Meta: db_table = 'menu'
hmgaudecker/econ-project-templates
{{cookiecutter.project_slug}}/.mywaflib/waflib/extras/fc_solstudio.py
Python
bsd-3-clause
1,642
0.028015
#! /usr/bin/env python # encoding: utf-8 # harald at klimachs.de import re from waflib import Utils from waflib.Tools import fc,fc_config,fc_scan from waflib.Configure import conf from waflib.Tools.compiler_fc import fc_compiler fc_compiler['linux'].append('fc_solstudio') @conf def find_solstudio(conf): """Find the Solaris Studio compiler (will look in the environment variable 'FC')""" fc = conf.find_program(['sunf95', 'f95', 'sunf90', 'f90'], var='FC') conf.get_solstudio_version(fc) conf.env.FC_NAME = 'SOL' @conf def solstudio_flags(conf): v = conf.env v['FCFLAGS_fcshlib'] = ['-Kpic'] v['FCFLAGS_DEBUG'] = ['-w3'] v['LINKFLAGS_fcshlib'] = ['-G'] v['FCSTLIB_MARKER'] = '-Bstatic' v['FCSHLIB_MARKER'] = '-Bdynamic' v['SONAME_ST'] = '-h %s' @conf def solstudio_modifier_platform(conf): des
t_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() solstudio_modifier_func = getattr(conf, 'solstudio_modifier_' + dest_os, None) if solstudio_modifier_func: solstudio_modifier_func() @conf def get_solstudio_version(conf, fc): """Get the compiler version""" version_re = re.compile(r"Sun Fortran 95 *(?P<major>\d*)\.(?P<minor>\d*)", re.I).search cmd = fc + ['-V'] out, err = fc_config.getoutput(conf,cmd,stdin=False) if out: match = version_re(out
) else: match = version_re(err) if not match: conf.fatal('Could not determine the Sun Studio Fortran version.') k = match.groupdict() conf.env['FC_VERSION'] = (k['major'], k['minor']) def configure(conf): conf.find_solstudio() conf.find_ar() conf.fc_flags() conf.fc_add_flags() conf.solstudio_flags() conf.solstudio_modifier_platform()
codendev/rapidwsgi
src/mako/pygen.py
Python
gpl-3.0
10,042
0.008365
# pygen.py # Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """utilities for generating and formatting literal Python code.""" import re, string from StringIO import StringIO from mako import exceptions class PythonPrinter(object): def __init__(self, stream): # indentation counter self.indent = 0 # a stack storing information about why we incremented # the indentation counter, to help us determine if we # should decrement it self.indent_detail = [] # the string of whitespace multiplied by the indent # counter to produce a line self.indentstring = " " # the stream we are writing to self.stream = stream # a list of lines that represents a buffered "block" of code, # which can be later printed relative to an indent level self.line_buffer = [] self.in_indent_lines = False self._reset_multi_line_flags() def write(
self, text): self.stream.write(text) def write_indented_block(self, block): """print a line or lines of python which already contain indentation. The indentation of the total block of lines will be adjusted to that of the current indent level.""" self.in_indent_lines = False for l in re.split(r'\r?\n', block): self.line_buffer.append(l) def writelines(self, *lines): """print
a series of lines of python.""" for line in lines: self.writeline(line) def writeline(self, line): """print a line of python, indenting it according to the current indent level. this also adjusts the indentation counter according to the content of the line.""" if not self.in_indent_lines: self._flush_adjusted_lines() self.in_indent_lines = True decreased_indent = False if (line is None or re.match(r"^\s*#",line) or re.match(r"^\s*$", line) ): hastext = False else: hastext = True is_comment = line and len(line) and line[0] == '#' # see if this line should decrease the indentation level if (not decreased_indent and not is_comment and (not hastext or self._is_unindentor(line)) ): if self.indent > 0: self.indent -=1 # if the indent_detail stack is empty, the user # probably put extra closures - the resulting # module wont compile. if len(self.indent_detail) == 0: raise exceptions.SyntaxException("Too many whitespace closures") self.indent_detail.pop() if line is None: return # write the line self.stream.write(self._indent_line(line) + "\n") # see if this line should increase the indentation level. # note that a line can both decrase (before printing) and # then increase (after printing) the indentation level. if re.search(r":[ \t]*(?:#.*)?$", line): # increment indentation count, and also # keep track of what the keyword was that indented us, # if it is a python compound statement keyword # where we might have to look for an "unindent" keyword match = re.match(r"^\s*(if|try|elif|while|for)", line) if match: # its a "compound" keyword, so we will check for "unindentors" indentor = match.group(1) self.indent +=1 self.indent_detail.append(indentor) else: indentor = None # its not a "compound" keyword. but lets also # test for valid Python keywords that might be indenting us, # else assume its a non-indenting line m2 = re.match(r"^\s*(def|class|else|elif|except|finally)", line) if m2: self.indent += 1 self.indent_detail.append(indentor) def close(self): """close this printer, flushing any remaining lines.""" self._flush_adjusted_lines() def _is_unindentor(self, line): """return true if the given line is an 'unindentor', relative to the last 'indent' event received.""" # no indentation detail has been pushed on; return False if len(self.indent_detail) == 0: return False indentor = self.indent_detail[-1] # the last indent keyword we grabbed is not a # compound statement keyword; return False if indentor is None: return False # if the current line doesnt have one of the "unindentor" keywords, # return False match = re.match(r"^\s*(else|elif|except|finally).*\:", line) if not match: return False # whitespace matches up, we have a compound indentor, # and this line has an unindentor, this # is probably good enough return True # should we decide that its not good enough, heres # more stuff to check. #keyword = match.group(1) # match the original indent keyword #for crit in [ # (r'if|elif', r'else|elif'), # (r'try', r'except|finally|else'), # (r'while|for', r'else'), #]: # if re.match(crit[0], indentor) and re.match(crit[1], keyword): return True #return False def _indent_line(self, line, stripspace = ''): """indent the given line according to the current indent level. stripspace is a string of space that will be truncated from the start of the line before indenting.""" return re.sub(r"^%s" % stripspace, self.indentstring * self.indent, line) def _reset_multi_line_flags(self): """reset the flags which would indicate we are in a backslashed or triple-quoted section.""" (self.backslashed, self.triplequoted) = (False, False) def _in_multi_line(self, line): """return true if the given line is part of a multi-line block, via backslash or triple-quote.""" # we are only looking for explicitly joined lines here, # not implicit ones (i.e. brackets, braces etc.). this is just # to guard against the possibility of modifying the space inside # of a literal multiline string with unfortunately placed whitespace current_state = (self.backslashed or self.triplequoted) if re.search(r"\\$", line): self.backslashed = True else: self.backslashed = False triples = len(re.findall(r"\"\"\"|\'\'\'", line)) if triples == 1 or triples % 2 != 0: self.triplequoted = not self.triplequoted return current_state def _flush_adjusted_lines(self): stripspace = None self._reset_multi_line_flags() for entry in self.line_buffer: if self._in_multi_line(entry): self.stream.write(entry + "\n") else: entry = entry.expandtabs() if stripspace is None and re.search(r"^[ \t]*[^# \t]", entry): stripspace = re.match(r"^([ \t]*)", entry).group(1) self.stream.write(self._indent_line(entry, stripspace) + "\n") self.line_buffer = [] self._reset_multi_line_flags() def adjust_white
plac-lab/TMIIaTest
Software/Control/src/fungen_ctrl.py
Python
mit
2,433
0.006165
#!/usr/bin/env python # -*- coding: utf-8 -*- ## @package fungen_ctrl # Control the function generator # from __future__ import print_function import time import os import sys import shutil import math # use either usbtmc or NI Visa try: import usbtmc except: import visa ## Rigol DG1022 class DG1022(object): ## @var handle to instrument _instr = None ## Initialization def __init__(self): try: rm = visa.ResourceManager() rm.list_resources() # list available instruments self._instr = rm.open_resource('USB0::0x1AB1::0x0588::DG1D131402088::INSTR') except: self._instr = usbtmc.Instrument(0x1ab1, 0x0588) # RIGOL TECHNOLOGIES,DG1022 ,DG1D131402088 self._instr.timeout = 10 ## Generate tail-pulse and write into instrument's memory # @param xp number of samples before the edge # @param np total number of samples for the pulse # @param alpha exp-decay coefficient in exp(-alpha * (i - xp)) def setup_tail_pulse(self, freq=100, xp=16, np=1024, alpha=0.01): self._instr.write("FUNC USER") time.sleep(0.5) self._instr.write("FREQ %g" % freq)
time.sleep(0.5) amax = 16383 vals=[0 for i in xrange(np)] for i in xrange(np): if i<xp:
vals[i] = amax else: vals[i] = int(amax*(1-math.exp(-(i-xp)*alpha))) string = "DATA:DAC VOLATILE" for i in xrange(np): string += (",%d"% vals[i]) self._instr.write(string) time.sleep(1.0) self._instr.write("FUNC:USER VOLATILE") time.sleep(0.5) def set_frequency(self, freq=100.0): self._instr.write("FREQ %g" % freq) time.sleep(0.5) def set_voltage(self, vLow=0.0, vHigh=1.0): self._instr.write("VOLT:UNIT VPP") time.sleep(0.5) self._instr.write("VOLTage:LOW %g" % vLow) time.sleep(0.5) self._instr.write("VOLTage:HIGH %g" % vHigh) time.sleep(0.5) def turn_on_output(self): self._instr.write("OUTP:LOAD 50") time.sleep(0.5) self._instr.write("OUTP ON") time.sleep(0.5) def close(self): self._instr.close() if __name__ == "__main__": fg = DG1022() fg.set_voltage(0.0, 0.1) fg.setup_tail_pulse(100, 64, 1024, 0.01) fg.turn_on_output() fg.close()
ilmir-k/website-addons
website_sale_available_fake/controllers/__init__.py
Python
lgpl-3.0
66
0
# -*- cod
ing: utf-8 -*- from . import website_sale_available_fak
e
gillarkod/jira_extended
example.py
Python
mit
240
0
from
jira_extended import JIRA jira = JIRA( '<url>', basic_auth=( '<user>', '<password>', ), options={ 'extended_url': '<url>', } )
jira.search_issues('project = "PROJECT1"')[0].move('PROJECT2')
ipazc/oculus-crawl
test/crawler_tests.py
Python
gpl-3.0
172
0
import
unittest class CrawlTests(unittest.TestCase): def test_something(self): self.assertEqual(True, False) if __name__ == '__main__': unittest.main()
westernx/sgevents
sgevents/dispatcher/dispatcher.py
Python
bsd-3-clause
4,868
0.003903
import itertools import logging import os import threading import yaml from ..utils import get_adhoc_module from .callback import Callback from .context import Context from .qube import QubeCallback from .shell import ShellScript from .. import logs log = logging.getLogger('sgevents.dispatcher') # __name__ is ugly here class Dispatcher(object): def __init__(self): self.contexts = [] self.handlers = [] self._dispatch_counter = itertools.count(1) def load_plugins(self, dir_path): """Load plugins from ``*.yml`` and ``*.py`` files in given directory.""" for name in os.listdir(dir_path): # Our NFS makes these all over the place. if name.startswith('.'): continue base, ext = os.path.splitext(name) if ext == '.py': self._load_python_plugin(os.path.join(dir_path, name)) elif ext in ('.yml', '.yaml'): self._load_yaml_plugin(os.path.join(dir_path, name)) def _load_python_plugin(self, path): log.info('Loading Python plugin(s) from %s' % path) module = get_adhoc_module(path) # Look for something that wants to register itself. init_func = getattr(module, '__sgevents_init__', None) if init_func: init_func(self) return # Look for a dictionary of metadata structured as one would find # in the yaml file. desc = getattr(module, '__sgevents__', None) if desc is not None: self.register(desc) return raise ValueError('missing __sgevents_init__ function and __sgevents__ dict in Python plugin') def _load_yaml_plugin(self, path): log.info('Loading YAML plugin(s) from %s' % path) for data in yaml.load_all(open(path).read()): self.register(data) def register(self, desc): if isinstance(desc, (list, tuple)): for x in desc: self.register(x) return if not isinstance(desc, dict): raise TypeError('plugin descriptions are dicts; got %s' % type(desc)) kwargs = desc.copy() enabled = kwargs.pop('enabled', True) if not enabled: return type_ = kwargs.pop('type') registrar = getattr(self, 'register_%s' % type_, None) if registrar: registrar(**kwargs) else: raise ValueError('unknown plugin type %r' % type_) def register_callback(self, **kwargs): self.handlers.append(Callback(**kwargs)) def register_qube_callback(self, **kwargs): self.handlers.append(QubeCallback(**kwargs)) def register_shell_script(self, **kwargs): self.handlers.append(ShellScript(**kwargs)) def register_context(self, **kwargs): self.contexts.append(Context(**kwargs)) def get_extra_fields(self): """Get list of extra fields for ``EventLog`` for filter evaluation.""" res = [] for ctx in self.contexts: res.extend(ctx.get_extra_fields()) for cb in self.handlers: res.extend(cb.get_extra_fields()) return res def dispatch(self, event): """Dispatch the given event.""" envvars = {} for ctx in self.contexts: if ctx.filter is not None: try: if not ctx.filter.eval(event): continue except: log.exception('Error during context %r filter; skipping event' % ctx.name) return #log.info('Matched context %r; setting %s' % (ctx.name, ' '.join(sorted('%s=%s' % x for x in ctx.envvars.iteritems())))) envvars.update(ctx.envvars) for handler in self.handlers: if handler.filter is not None: try: if not handler.filter.eval(event): continue except: log.exception('Error during %s %r filter; skipping handler for event' % (handler.__class__.__name__.lower(), handler.name)) continue thread = threading.Thread(target=self._dispatch_thread_target, args=(logs.get_log_meta(), handler, envvars, event)) thread.start() # Not daemon! def _dispatch_thread_target(self, log_meta, handler, envvars, event): try: logs.update_log_meta(**log_meta) logs.update_log_meta(event=event.id, dispatch=next(self._dispatch_counter)) log.info('Dispatching to %s %r' % (handler.__class__.__name__.lower(), h
andler.name)) handler.handle_event(self, envvars, event) except: log.exception('Error during %s %r' % (handler.__class__.__name__.low
er(), handler.name))
trevor/calendarserver
txdav/base/datastore/util.py
Python
apache-2.0
4,470
0.004474
# -*- test-case-name: txdav.caldav.datastore.test.test_file -*- ## # Copyright (c) 2010-2014 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## """ Common utility functions for a datastores. """ from uuid import UUID from twext.python.log import Logger from twistedcaldav.memcacher import Memcacher log = Logger() _unset = object() class cached(object): """ This object is a decorator for a 0-argument method which should be called only once, and its result cached so that future invocations just return the same result without calling the underlying method again. @ivar thunk: the function to call to generate a cached value. """ def __init__(self, thunk): self.thunk = thunk def __get__(self, oself, owner): def inner(): cacheKey = "_" + self.thunk.__name__ + "_cached" cached = getattr(oself, cacheKey, _unset) if cached is _unset: value = self.thunk(oself) setattr(oself, cacheKey, value) return value else: return cached return inner class QueryCacher(Memcacher): """ A Memcacher for the object-with-name query (more to come) """ def __init__(self, cachePool="Default", cacheExpireSeconds=3600): super(QueryCacher, self).__init__(cachePool, pickle=True) sel
f.cacheExpireSeconds = cacheExpireSeconds def set(self, key, value): return
super(QueryCacher, self).set(key, value, expireTime=self.cacheExpireSeconds) def delete(self, key): return super(QueryCacher, self).delete(key) def setAfterCommit(self, transaction, key, value): transaction.postCommit(lambda: self.set(key, value)) def invalidateAfterCommit(self, transaction, key): # Invalidate now (so that operations within this transaction see it) # and *also* post-commit (because there could be a scheduled setAfterCommit # for this key) transaction.postCommit(lambda: self.delete(key)) return self.delete(key) # Home child objects by name def keyForObjectWithName(self, homeResourceID, name): return "objectWithName:%s:%s" % (homeResourceID, name) # Home child objects by id def keyForObjectWithResourceID(self, homeResourceID, resourceID): return "objectWithResourceID:%s:%s" % (homeResourceID, resourceID) # Home child objects by external id def keyForObjectWithExternalID(self, homeResourceID, externalID): return "objectWithExternalID:%s:%s" % (homeResourceID, externalID) # Home metadata (Created/Modified) def keyForHomeMetaData(self, homeResourceID): return "homeMetaData:%s" % (homeResourceID) # HomeChild metadata (Created/Modified (and SUPPORTED_COMPONENTS)) def keyForHomeChildMetaData(self, resourceID): return "homeChildMetaData:%s" % (resourceID) def normalizeUUIDOrNot(somestr): """ Take a string which may be: - the hex format of a UUID - a urn:uuid: URI containing a UUID - some other random thing and return, respectively: - the hex format of a UUID converted to upper case - a urn:uuid: URI with an upper-cased UUID (but not an upper-cased scheme and namespace) - some other random thing, unmodified @type somestr: L{str} @return: L{str} """ uuu = "urn:uuid:" isURI = somestr.startswith(uuu) if isURI: normstr = somestr[len(uuu):] else: normstr = somestr try: uu = UUID(normstr) except ValueError: if isURI: log.info(format="normalizing urn:uuid: without UUID: %(uid)r", uid=somestr) # not a UUID, whatever return somestr else: normalForm = str(uu).upper() if isURI: return uuu + normalForm else: return normalForm
git-commit/iot-gatekeeper
gatekeeper/test_app.py
Python
mit
25
0.04
import bot
bo
t.run_bot()
Jameeeees/Mag1C_baNd
kekangpai/testCon/migrations/0011_auto_20160716_1025.py
Python
apache-2.0
1,103
0
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-07-16 10:25 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('test
Con', '0010_auto_20160713_1534'), ] operations = [ migrations.AddField( model_name='account_teacher', name='teacherName', field=m
odels.CharField(default='TeacherName', max_length=20), preserve_default=False, ), migrations.AddField( model_name='classinfo', name='classCode', field=models.CharField(default=0, max_length=10), preserve_default=False, ), migrations.AddField( model_name='classinfo', name='username', field=models.CharField(default=0, max_length=20), preserve_default=False, ), migrations.AlterField( model_name='account_teacher', name='username', field=models.CharField(max_length=20, unique=True), ), ]
quattor/aquilon
lib/aquilon/worker/dbwrappers/search.py
Python
apache-2.0
3,147
0
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*- # ex: set expandtab softtabstop=4 shiftwidth=4: # # Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2017 Contributor # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides the logic used by all the search_next commands.""" import re from aquilon.utils import force_int int_re = re.compile(r'^(\d+)') def search_next(session, cls, attr, value, start, pack, locked=False, **filters): q = session.query(cls).filter(attr.startswith(value)) if filters: q = q.filter_by(**filters) # Doing the locking here is not the most efficient as we're potentially # locking a lot of rows - but if there's no better object to lock, then we # don't have much choice. if locked and q.count() == 0: # Nothing to lock -- so we'll crudely
pick out the first and # lock that. q2 = session.query(cls).order_by(attr).limit(1) if q2.count() == 1: attrval = q2.value(attr) # This is not particularly
pleasant: Oracle won't permit a # "FOR UPDATE" query where "ORDER BY" is given (ORA-02014); # constructing the allowable version of the query may not be # possible with SQLAlchemy. q2 = session.query(cls).filter(attr == attrval) session.execute(q2.with_for_update()) # Re-execute the original query: only 1 will get through here q = session.query(cls).filter(attr.startswith(value)) if filters: q = q.filter_by(**filters) # Else (q2.count == 0): the table is empty, so we'll just head # forwards and accept that this may break in that fairly rare # (one-off) case; something may also have raced and removed the # first row we picked. elif locked: # The FOR UPDATE query needs to be executed separately, otherwise it # won't see allocations done in a different session session.execute(q.with_for_update()) if start: start = force_int("start", start) else: start = 1 entries = set() for (attrvalue,) in q.values(attr): m = int_re.match(attrvalue[len(value):]) if m: n = int(m.group(1)) # Only remember entries that we care about... if n >= start: entries.add(n) if not entries: return start entries = sorted(entries) if pack: expecting = start for current in entries: if current > expecting: return expecting expecting += 1 return entries[-1] + 1
montimaj/ShaktiT_LLVM
llvm/utils/lit/lit/formats/shtest.py
Python
gpl-3.0
1,993
0.003011
from __future__ import absolute_import import os import lit.Test import lit.TestRunner from .base import TestFormat class ShTest(TestFormat): """ShTest is a format with one file per test. This is the primary format for regression tests as described in the LLVM testing guide: http://llvm.org/docs/TestingGuide.html The ShTest files contain some number of shell-like command pipelines, along with assertions about what should be in the output. """ def __init__(self, execute_external = False): """Initializer. The 'execute_external' argument controls whether lit uses its internal logic for command pipelines, or passes the command to a shell subprocess. Args: execute_external: (optional) If true, use shell subprocesses instead of lit's internal pipeline logic. """ self.execute_external = execute_external def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
"""Yields test files matching 'suffixes' from the localConfig.""" source_path = testSuite.getSourcePath(path_in_suite) for filename in os.listdir(source_path): # Ignore dot files and excluded tests. if (filename.startswith('.') or filename in localConfig.excludes): continue filepath = os.path.join(source_path, filename) if not os.path.isdir(filepath): base,ext = os.path.splite
xt(filename) if ext in localConfig.suffixes: yield lit.Test.Test(testSuite, path_in_suite + (filename,), localConfig) def execute(self, test, litConfig): """Interprets and runs the given test file, and returns the result.""" return lit.TestRunner.executeShTest(test, litConfig, self.execute_external)
jmuckian/GodsAndMonsters
bin/char.py
Python
gpl-3.0
46,928
0.003111
import random import sys import pygame import string import re import xml.dom.minidom from pygame.locals import * from gamedata import * from menu import Menu class CreateCharacter: """Creates a new character for Gods & Monsters based on the rules defined in the Rule Book beginning on page 6. """ def __init__(self): self.display = Display() self.gamedata = GameData() self.chardata = CharacterData().chardata def createcharacter(self, screen): """Initiates the creation of a new character.""" self.screen = screen # Set new character's level to 1 self.chardata["Level"] = 1 self.sheet = DisplayCharacter() self.generateabilites(screen) self.assignabilities(screen) self.selectspecies(screen) self.setspeciesabilities() self.selectgender(screen) self.selectarchetype(screen) self.selectmoralcode(screen) self.setexperience() self.setskillpoints() self.setsurvival() self.setweapons() self.setinitialgold() self.setsaves() self.setsurprise() self.setadvantage() self.setdefense() self.setattackbonus() self.setphysicaltraits() self.setmovement() self.setmojo() self.setname(screen) # self.chooseskills(screen) self.sheet.printcharactersheet(self.chardata, self.screen) while True: event = pygame.event.wait() if event.type == KEYDOWN: if event.key == K_q: exit() def generateabilites(self, screen): """Rolls six scores at 4d6, discarding the lowest die roll and checks to see that at least one is a 9 or higher. If none are at least 9, passes the scores on to give the player the option of rolling six more or changing lowest to 18. """ scores = [] # Generate six ability scores for i in range(6): scores.append(self.rollability()) # Checks to ensure at least one is 9 or higher. # Allows player to roll 6 more or assign 18 if not. if max(scores) < 9: scores = self.changeprime(scores, screen) # Attached modifiers to ability scores for later reference.
# se
lf.chardata[ability][-1] is set to original value to # account for temporary increases or decreases (curses, # magic, etc). i = 0 for score in scores: scores[i] = [score, self.gamedata.ABIL_MODIFIERS[score][0], self.gamedata.ABIL_MODIFIERS[score][1], self.gamedata.ABIL_MODIFIERS[score][2], score ] i += 1 # Assigns scores (temporarily) to abilities i = 0 for ability in self.gamedata.ABIL_NAMES: self.chardata[ability] = scores[i] i += 1 def rollability(self): """Rolls one score at 4d6, discarding lowest and passing it back to calling function. """ roll = [] for i in range(4): roll.append(random.randint(1, 6)) return sum(roll) - min(roll) def changeprime(self, scores, screen): """If no abilities are at least 9, gives the player the option to roll six more scores, taking the highest of the twelve, or to raise the lowest of the six scores to 18. Then passes them back to the calling function. --Page 11-- """ prompt = ["Your character's ability scores are", "too low for an archetype selection.", "You may roll six more and take the", "highest of all twelve rolls, or", "increase your lowest score to 18." ] choices = ["Roll", "Increase"] bg = pygame.image.load(self.display.BG_FULL).convert() screen.blit(bg, (0, 0)) element = "ABILITY SCORES:" value = str(scores[0]) + ", " + str(scores[1]) + ", " + \ str(scores[2]) + ", " + str(scores[3]) + ", " + \ str(scores[4]) + ", " + str(scores[5]) row = 14 col = 2 text = self.display.FONT.render(element, True, self.display.WHITE) screen.blit(text, (col * self.display.CH_SPACE, row * self.display.CH_SPACE)) text = self.display.FONT.render(value, True, self.display.BRIGHT_GREEN) screen.blit(text, ((col + 16) * self.display.CH_SPACE, row * self.display.CH_SPACE)) row += 3 for line in prompt: text = self.display.FONT.render(line.upper(), True, self.display.BRIGHT_GREEN) screen.blit(text, (col * self.display.CH_SPACE, row * self.display.CH_SPACE)) row += 1 row = 24 col = 0 for item in choices: ch = self.display.FONT.render(item[0].upper(), True, self.display.WHITE) screen.blit(ch, (col * self.display.CH_SPACE, row * self.display.CH_SPACE)) text = self.display.FONT.render(item[1:].upper(), True, self.display.BRIGHT_GREEN) screen.blit(text, ((col + 1) * self.display.CH_SPACE, row * self.display.CH_SPACE)) col += len(item) + 1 pygame.display.update() while True: event = pygame.event.wait() if event.type == KEYDOWN: if event.key == K_r: for i in range(6): scores.append(self.rollability()) for i in range(6): scores.remove(min(scores)) return scores elif event.key == K_i: lowest = scores.index(min(scores)) scores[lowest] = 18 return scores def assignabilities(self, screen): """Initiates assignment of ability scores and swaps scores a player request. """ while True: prompta = ["You may customize your character's", "abilities.", "", "Select the first ability to swap, or", "'f' to finish." ] promptb = ["You may customize your character's", "abilities.", "", "Select the second ability to swap,", "or 'f' to finish." ] while True: bg = pygame.image.load(self.display.BG_FULL).convert() screen.blit(bg, (0, 0)) self.sheet.selectabilities(self.chardata, self.screen) row = 24 col = 0 ch = self.display.FONT.render("F", True, self.display.WHITE) screen.blit(ch, (col * self.display.CH_SPACE, row * self.display.CH_SPACE)) text = self.display.FONT.render("INISH", True, self.display.BRIGHT_GREEN) screen.blit(text, ((col + 1) * self.display.CH_SPACE, row * self.display.CH_SPACE)) row = 17 col = 2 for line in prompta: text = self.display.FONT.render(line.upper(), True, self.display.BRIGHT_GREEN) screen.blit(text, (col * self.display.CH_SPACE, row * self.display.CH_
chaocodes/playlist-manager-django
manager/playlist/views.py
Python
mit
2,983
0.002011
from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.http import Http404, JsonResponse, HttpResponseForbidden from django.shortcuts import render, redirect, get_object_or_404 from .forms import PlaylistForm from .models import Playlist def form_data(user, form): data = { 'owner': user, 'playlist_form': form, } return data def playlists(request, user_id): user = get_object_or_404(User, id=user_id) if request.method == 'GET': data = { 'owner': user, 'playlists': Playlist.objects.filter(user=user), } return render(request, 'playlist/index.html', data) elif request.method == 'POST': # Check if user matches URL if request.user != user: return HttpResponseForbidden() form = PlaylistForm(request.POST) if form.is_valid(): playlist = form.save(commit=False) playlist.user = request.user playlist.save() return redirect('playlist:all', user_id) else: data = form_data(user, form) return render(request, 'playlist/form.html', data) @login_required def create_view(request, user_id): user = get_object_or_404(User, id=user_id) # Check if user matches URL if request.user != user: return redirect('playlist:create', request.user.id) data = form_data(user, PlaylistForm()) return render(request, 'playlist/form.html', data) def playlist(request, user_id, playlist_id): user = get_object_or_404(User, id=user_id) playlist = get_object_or_404(Playlist, id=playlist_id, user=user) if request.method == 'GET': data = { 'owner': user, 'playlist': playlist, } return render(request, 'playlist/playlist.html', data) elif request.method == 'POST': # Check if user owns playlist if request.user != playlist.user: return HttpResponseForbidden() action = request.GET.get('a
ction', False) if action: if action == 'delete': playlist.delete() elif action == 'update': form = PlaylistForm(request.POST, instance=playlist) if form.is_valid(): playlist = form.save() else: data = form_data(user, form) return render(request, 'p
laylist/form.html', data) return redirect('playlist:all', user_id) @login_required def edit_view(request, user_id, playlist_id): user = get_object_or_404(User, id=user_id) playlist = get_object_or_404(Playlist, id=playlist_id, user=user) # Check if playlist belongs to logged in user if request.user != playlist.user: return redirect('playlist:all', playlist.user.id) data = form_data(user, PlaylistForm(instance=playlist)) return render(request, 'playlist/form.html', data)
si618/pi-time
pi_time/pi_time/db.py
Python
gpl-3.0
39
0.025641
"
""Database access
code...twistar?"""
Stratoscale/pycommonlog
js/main.py
Python
apache-2.0
1,322
0.005295
import shutil import argparse import os import subprocess import SimpleHTTPServer import SocketServer import socket parser = argparse.ArgumentParser( description = 'Present summary of tests results in a webpage.' ) parser.add_argument("--root", default="logs.racktest") parser.add_argument("--whiteboxRoot", action="store_true") parser.add_argument("--noB
rowser", help="avoid openning the browser", action="store_true") parser.add_argument("--noServer", action="store_true") args = parser.parse_args() if args.whiteboxRoot: args.root = "logs.whiteboxtest" root = args.root reporterDir = os.path.join(root, "_reporter") originalReporterDir = "../pycommonlog/js" shutil.rmtree(reporterDir, ignore_errors=True) shutil.copytree(originalReporterDir,
reporterDir) class ReusingTCPServer(SocketServer.TCPServer): def server_bind(self): self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.bind(self.server_address) if not args.noServer: os.chdir(args.root) PORT = 8000 Handler = SimpleHTTPServer.SimpleHTTPRequestHandler httpd = ReusingTCPServer(("127.0.0.1", PORT), Handler) BROWSER_CMDLINE = ["google-chrome", "http://127.0.0.1:8000/_reporter/index.html"] if not args.noBrowser: subprocess.Popen(BROWSER_CMDLINE) httpd.serve_forever()
googleads/google-ads-python
google/ads/googleads/v9/errors/types/shared_set_error.py
Python
apache-2.0
1,240
0.000806
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Versi
on 2.0 (the "License"); # y
ou may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v9.errors", marshal="google.ads.googleads.v9", manifest={"SharedSetErrorEnum",}, ) class SharedSetErrorEnum(proto.Message): r"""Container for enum describing possible shared set errors. """ class SharedSetError(proto.Enum): r"""Enum describing possible shared set errors.""" UNSPECIFIED = 0 UNKNOWN = 1 CUSTOMER_CANNOT_CREATE_SHARED_SET_OF_THIS_TYPE = 2 DUPLICATE_NAME = 3 SHARED_SET_REMOVED = 4 SHARED_SET_IN_USE = 5 __all__ = tuple(sorted(__protobuf__.manifest))
jrbl/invenio
modules/bibauthorid/lib/bibauthorid_prob_matrix.py
Python
gpl-2.0
4,586
0.004143
# -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2011, 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. import bibauthorid_config as bconfig from bibauthorid_comparison import compare_bibrefrecs from bibauthorid_comparison import clear_all_caches as clear_comparison_caches from bibauthorid_backinterface import bib_matrix from bibauthorid_backinterface import get_sql_time from bibauthorid_backinterface import filter_modified_record_ids from bibauthorid_general_utils import update_status \ , update_status_final if bconfig.DEBUG_CHECKS: def _debug_is_eq(v1, v2): eps = 1e-2 return v1 + eps > v2 and v2 + eps > v1 def _debug_is_eq_v(vl1, vl2): if isinstance(vl1, str) and isinstance(vl2, str): return vl1 == vl2 if isinstance(vl1, tuple) and isinstance(vl2, tuple): return _debug_is_eq(vl1[0], vl2[0]) and _debug_is_eq(vl1[1], vl2[1]) return False class probability_matrix: ''' This class contains and maintains the comparison between all virtual authors. It is able to write and read from the database and update the results. ''' def __init__(self, cluster_set, use_cache=False, save_cache=False): ''' Constructs probability matrix. If use_cache is true, it will try to load old computations from the database. If save cache is true it will save the current results into the database. @param cluster_set: A cluster set object, used to initialize the matrix. ''' def check_for_cleaning(cur_calc): if cur_calc % 10000000 == 0: clear_comparison_caches() self._bib_matrix = bib_matrix(cluster_set) old_matrix = bib_matrix() ncl = sum(len(cl.bibs) for cl in cluster_set.clusters) expected = ((ncl * (ncl - 1)) / 2) if expected == 0: expected = 1 if use_cache and old_matrix.load(cluster_set.last_name): cached_bibs = set(filter_modified_record_ids( old_matrix.get_keys(), old_matrix.creation_time)) else: cached_bibs = set() if save_cache: creation_time = get_sql_time() cur_calc, opti = 0, 0 for cl1 in cluster_set.clusters: update_status((float(opti) + cur_calc) / expected, "Prob matrix: calc %d, opti %d." % (cur_calc, opti)) for cl2 in cluster_set.clusters: if id(cl1) < id(cl2) and not cl1.hates(c
l2): for bib1 in cl1.bibs: for bib2 in cl2.bibs: if bib1 in cached_bibs and bib2 in cached_bibs: val = old_matrix[bib1, bib2] if not val:
cur_calc += 1 check_for_cleaning(cur_calc) val = compare_bibrefrecs(bib1, bib2) else: opti += 1 if bconfig.DEBUG_CHECKS: assert _debug_is_eq_v(val, compare_bibrefrecs(bib1, bib2)) else: cur_calc += 1 check_for_cleaning(cur_calc) val = compare_bibrefrecs(bib1, bib2) self._bib_matrix[bib1, bib2] = val clear_comparison_caches() if save_cache: update_status(1., "saving...") self._bib_matrix.store(cluster_set.last_name, creation_time) update_status_final("Matrix done. %d calc, %d opt." % (cur_calc, opti)) def __getitem__(self, bibs): return self._bib_matrix[bibs[0], bibs[1]]
mailjet/mailjet-apiv3-python
mailjet_rest/__init__.py
Python
mit
189
0
#!/us
r/bin/env python # coding=utf-8 from mailjet_rest.client
import Client from mailjet_rest.utils.version import get_version __version__ = get_version() __all__ = (Client, get_version)
QANSEE/l10n-belgium
account_companyweb/wizard/__init__.py
Python
agpl-3.0
1,052
0
# -*- coding: utf-8 -*- # ############################################################################## # # Authors: Adrien Peiffer # Copyright (c) 2014 Acsone
SA/NV (http://www.acsone.eu) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or F
ITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import account_companyweb_report_wizard from . import partner_update_companyweb