repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
rgs1/zk_shell
|
zk_shell/tests/test_watcher.py
|
Python
|
apache-2.0
| 633
| 0
|
# -*- coding: utf-8 -*-
""" watcher test cases """
from .shell_test_case import ShellTestCase
from zk_shell.watcher import ChildWatcher
class WatcherTestC
|
ase(ShellTestCase):
""" test watcher """
def test_add_update(self):
watcher = ChildWatcher(self.client, print_func=self.shell.show_output)
path = "%s/watch" % self.tests_path
self.shell.onecmd("create %s ''" % path)
watcher.add(path,
|
True)
# update() calls remove() as well, if the path exists.
watcher.update(path)
expected = "\n/tests/watch:\n\n"
self.assertEquals(expected, self.output.getvalue())
|
citiufpe/citi-webplate
|
project_name/settings/test.py
|
Python
|
mit
| 360
| 0
|
from .base import *
DEBUG = True
db_url = 'sqlite:///{}'.format(os.path.join(BASE_DIR, 'test.sqlite3'))
def
|
ault_db = dj_database_url.config(default=db_url, conn_max_age=None)
DATABASES['default'].update(default_db)
PASSWORD_HASHE
|
RS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
yprez/django-useful
|
test_project/test_project_py2/settings.py
|
Python
|
isc
| 1,842
| 0.001086
|
# Django settings for test_project project.
DEBUG = True
TEMPLAT
|
E_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGER
|
S = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'Etc/UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY = 't^4dt#fkxftpborp@%lg*#h2wj%vizl)#pkkt$&0f7b87rbu6y'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.admin',
'djcelery',
'django_nose',
'useful', # Import the app to run tests
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEMPLATE_CONTEXT_PROCESSORS = (
'useful.context_processors.settings',
)
BROKER_BACKEND = 'memory'
CELERY_ALWAYS_EAGER = True
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'cleanup': {
'task': 'useful.tasks.call_management_command',
'schedule': timedelta(seconds=10),
'args': ('validate', ),
},
}
|
ygol/odoo
|
addons/stock/tests/test_product.py
|
Python
|
agpl-3.0
| 4,949
| 0.001819
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Author: Leonardo Pistone
# Copyright 2015 Camptocamp SA
from odoo.addons.stock.tests.common2 import TestStockCommon
from odoo.tests.common import Form
class TestVirtualAvailable(TestStockCommon):
def setUp(self):
super(TestVirtualAvailable, self).setUp()
# Make `product3` a storable product for this test. Indeed, creating quants
# and playing with owners is not possible for consumables.
self.product_3.type = 'product'
self.env['stock.quant'].create({
'product_id': self.product_3.id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'quantity': 30.0})
self.env['stock.quant'].create({
'product_id': self.product_3.id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'quantity': 10.0,
'owner_id': self.user_stock_user.partner_id.id})
self.picking_out = self.env['stock.picking'].create({
'picking_type_id': self.ref('stock.picking_type_out'),
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id})
self.env['stock.move'].create({
'name': 'a move',
'product_id': self.product_3.id,
'product_uom_qty': 3.0,
'product_uom': self.product_3.uom_id.id,
'picking_id': self.picking_out.id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id})
self.picking_out_2 = self.env['stock.picking'].create({
'picking_type_id': self.ref('stock.picking_type_out'),
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id})
self.env['stock.move'].create({
'restrict_partner_id': self.user_stock_user.partner_id.id,
'name': 'another move',
'product_id': self.product_3.id,
'product_uom_qty': 5.0,
'product_uom': self.product_3.uom_id.id,
'picking_id': self.picking_out_2.id,
'location_id': self.env.ref('stock.stock_location_stock').id,
'location_dest_id': self.env.ref('stock.stock_location_customers').id})
def test_without_owner(self):
self.assertAlmostEqual(40.0, self.product_3.virtual_available)
self.picking_out.action_assi
|
gn()
self.picking_out_2.action_assign()
self.assertAlmostEqual(32.0, self.product_3.virtual_available)
def test_with_owner(self):
prod_context = self.product_3.with_context(owner_id=self.user_stock_user.partner_id.id)
self.assertAlmostEqual(10.0, prod_context.virtual_available)
self.picking_out.action_assign()
self.picking_out_2.acti
|
on_assign()
self.assertAlmostEqual(5.0, prod_context.virtual_available)
def test_free_quantity(self):
""" Test the value of product.free_qty. Free_qty = qty_on_hand - qty_reserved"""
self.assertAlmostEqual(40.0, self.product_3.free_qty)
self.picking_out.action_confirm()
self.picking_out_2.action_confirm()
# No reservation so free_qty is unchanged
self.assertAlmostEqual(40.0, self.product_3.free_qty)
self.picking_out.action_assign()
self.picking_out_2.action_assign()
# 8 units are now reserved
self.assertAlmostEqual(32.0, self.product_3.free_qty)
self.picking_out.do_unreserve()
self.picking_out_2.do_unreserve()
# 8 units are available again
self.assertAlmostEqual(40.0, self.product_3.free_qty)
def test_archive_product_1(self):
"""`qty_available` and `virtual_available` are computed on archived products"""
self.assertTrue(self.product_3.active)
self.assertAlmostEqual(40.0, self.product_3.qty_available)
self.assertAlmostEqual(40.0, self.product_3.virtual_available)
self.product_3.active = False
self.assertAlmostEqual(40.0, self.product_3.qty_available)
self.assertAlmostEqual(40.0, self.product_3.virtual_available)
def test_archive_product_2(self):
"""Archiving a product should archive its reordering rules"""
self.assertTrue(self.product_3.active)
orderpoint_form = Form(self.env['stock.warehouse.orderpoint'])
orderpoint_form.product_id = self.product_3
orderpoint_form.location_id = self.env.ref('stock.stock_location_stock')
orderpoint_form.product_min_qty = 0.0
orderpoint_form.product_max_qty = 5.0
orderpoint = orderpoint_form.save()
self.assertTrue(orderpoint.active)
self.product_3.active = False
self.assertFalse(orderpoint.active)
|
abhikeshav/ydk-py
|
core/samples/bgp_netconf.py
|
Python
|
apache-2.0
| 4,063
| 0.002461
|
#!/usr/bin/env python
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
# ----------------------------------------------------------------
# bgp.py Sample program illustrating use of generated api
# ydk.models.bgp.bgp.py which inturn is derived from the
# open-config bgp yang module.
#
from ydk.types import Empty
from ydk.providers import NetconfServiceProvider, CodecServiceProvider
from ydk.services import CRUDService, NetconfService, CodecService, Datastore
from ydk.models.openconfig import bgp
from ydk.models.openconfig.routing_policy import RoutingPolicy
from _config_builder import _get_bgp_config, _get_routing_cfg, _get_bgp_routing_multiple_object
def bgp_run(netconf_service, session):
# set up routing policy definition
routing_policy = _get_routing_cfg()
netconf_service.edit_config(session, Datastore.candidate, routing_policy)
bgp_cfg = _get_bgp_config()
# IPv4 Neighbor instance config done
netconf_service.edit_config(session, Datastore.candidate, bgp_cfg)
bgp_cfg_read = netconf_service.get_config(session, Datastore.candidate, bgp.Bgp())
print bgp_cfg_read
# IPv6 Neighbor instance config
nbr_ipv6 = bgp.Bgp.Neighbors.Neighbor()
nbr_ipv6.parent = bgp_cfg.neighbors
nbr_ipv6.neighbor_address = '2001:db8:fff1::1'
nbr_ipv6.config.neighbor_address = '2001:db8:fff1::1'
nbr_ipv6.config.peer_as = 65002
nbr_ipv6_afsf = nbr_ipv6.afi_safis.AfiSafi()
nbr_ipv6_afsf.afi_safi_name = 'ipv6-unicast'
nbr_ipv6_afsf.config.peer_as = 65002
nbr_ipv6_afsf.config.afi_safi_name = 'ipv6-unicast'
nbr_ipv6_afsf.config.enabled = True
nbr_ipv6.afi_safis.afi_safi.append(nbr_ipv6_afsf)
netconf_service.edit_config(session, Datastore.candidate, bgp_cfg)
nbr_ipv6_filter = bgp.Bgp.Neighbors.Neighbor()
nbr_ipv6_filter.neighbor_address = '2001:db8:fff1::1'
nbr_ipv6_read = netconf_service.get_config(session, Datastore.candidate, bgp_cfg)
print nbr_ipv6_read
def run_multiple_routing_bgp(netconf_service, session):
crud = CRUDService()
codec = CodecService()
codec_provider = CodecServiceProvider()
crud.delete(session, bgp())
crud.delete(session, RoutingPolicy())
multi_cfg = _get_bgp_routing_multiple_object()
multi_payload_expected = codec.encode(codec_provider, multi_cfg)
result = netconf_service.edit_config(session, Datastore.candidate, multi_cfg)
assert 'ok' in result
multi_filter = {'bgp':bgp(), 'routing-policy':RoutingPolicy()}
multi_entity_read = netconf_service
|
.get_config(session, Datastore.candidate, multi_filter)
multi_payload_actual = codec.encode(codec_provider, multi_entity_read)
assert multi_payload_expected == m
|
ulti_payload_actual
def init_logging():
import logging
logger = logging.getLogger("ydk")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == "__main__":
init_logging()
provider = NetconfServiceProvider(address='127.0.0.1', username='admin', password='admin', protocol='ssh', port=12022)
netconf_service = NetconfService()
bgp_run(netconf_service, provider)
# run_multiple_routing_bgp(netconf_service, provider)
exit()
|
biocyberman/bcbio-nextgen
|
bcbio/ngsalign/novoalign.py
|
Python
|
mit
| 6,281
| 0.003184
|
"""Next-gen sequencing alignment with Novoalign: http://www.novocraft.com
For BAM input handling this requires:
novoalign (with license for multicore)
samtools
"""
import os
import subprocess
from bcbio import bam, utils
from bcbio.ngsalign import alignprep, postalign
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.distributed.transaction import tx_tmpdir
from bcbio.utils import (memoize_outfile, file_exists)
# ## BAM realignment
def get_rg_info(names):
out = r"@RG\tID:{rg}\tPL:{pl}\tPU:{pu}\tSM:{sample}".format(**names)
if names.get("lb"):
out += r"\tLB:{lb}".format(**names)
return out
def align_bam(in_bam, ref_file, names, align_dir, data):
"""Perform realignment of input BAM file; uses unix pipes for avoid IO.
"""
config = data["config"]
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
novoalign = config_utils.get_program("novoalign", config)
samtools = config_utils.get_program("samtools", config)
resources = config_utils.get_resources("novoalign", config)
num_cores = config["algorithm"].get("num_cores", 1)
max_mem = resources.get("memory", "4G").upper()
extra_novo_args = " ".join(_novoalign_args_from_config(config, False))
if not file_exists(out_file):
with tx_tmpdir(data, base_dir=align_dir) as work_dir:
with postalign.tobam_cl(data, out_file, bam.is_paired(in_bam)) as (tobam_cl, tx_out_file):
rg_info = get_rg_info(names)
tx_out_prefix = os.path.splitext(tx_out_file)[0]
prefix1 = "%s-in1" % tx_out_prefix
cmd = ("unset JAVA_HOME && "
"{samtools} sort -n -o -l 1 -@ {num_cores} -m {max_mem} {in_bam} {prefix1} "
"| {novoalign} -o SAM '{rg_info}' -d {ref_file} -f /dev/stdin "
" -F BAMPE -c {num_cores} {extra_novo_args} | ")
cmd = (cmd + tobam_cl).format(**locals())
do.run(cmd, "Novoalign: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, in_bam)])
return out_file
# ## Fastq to BAM alignment
def align_pipe(fastq_file, pair_file, ref_file, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted output BAM.
"""
pair_file = pair_file if pair_file else ""
# back compatible -- older files were named with lane information, use sample name now
out_file = os.path.join(align_dir, "{0}-sort.bam".format(names["lane"]))
if not utils.file_exists(out_file):
out_file = os.path.join(align_dir, "{0}-sort.bam".format(dd.get_sample_name(data)))
if data.get("align_split") or fastq_file.endswith(".sdf"):
final_file = out_file
out_file, data = alignprep.setup_combine(final_file, data)
fastq_file, pair_file = alignprep.split_namedpipe_cls(fastq_file, pair_file, data)
else:
final_file = None
samtools = config_utils.get_program("samtools", data["config"])
novoalign =
|
config_utils.get_program("novoalign", data["config"])
resources = config_utils.get_resources("novoalign", data["config"])
num_cores = data["config"]["algorithm"].get("num_cores", 1)
max_mem = resources.get("memory", "1G")
extra_novo_args = " ".join(_novoalign_args_from_config(data["config"]))
rg_info = get_rg_info(names)
if not utils.file_exists(out_file) and (fina
|
l_file is None or not utils.file_exists(final_file)):
with tx_tmpdir(data) as work_dir:
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
tx_out_prefix = os.path.splitext(tx_out_file)[0]
cmd = ("unset JAVA_HOME && "
"{novoalign} -o SAM '{rg_info}' -d {ref_file} -f {fastq_file} {pair_file} "
" -c {num_cores} {extra_novo_args} | ")
cmd = (cmd + tobam_cl).format(**locals())
do.run(cmd, "Novoalign: %s" % names["sample"], None,
[do.file_nonempty(tx_out_file), do.file_reasonable_size(tx_out_file, fastq_file)])
data["work_bam"] = out_file
return data
def _novoalign_args_from_config(config, need_quality=True):
"""Select novoalign options based on configuration parameters.
"""
if need_quality:
qual_format = config["algorithm"].get("quality_format", "").lower()
qual_flags = ["-F", "ILMFQ" if qual_format == "illumina" else "STDFQ"]
else:
qual_flags = []
multi_mappers = config["algorithm"].get("multiple_mappers")
if multi_mappers is True:
multi_flag = "Random"
elif isinstance(multi_mappers, basestring):
multi_flag = multi_mappers
else:
multi_flag = "None"
multi_flags = ["-r"] + multi_flag.split()
resources = config_utils.get_resources("novoalign", config)
# default arguments for improved variant calling based on
# comparisons to reference materials: turn off soft clipping and recalibrate
if resources.get("options") is None:
extra_args = ["-o", "FullNW", "-k"]
else:
extra_args = [str(x) for x in resources.get("options", [])]
return qual_flags + multi_flags + extra_args
# Tweaks to add
# -k -t 200 -K quality calibration metrics
# paired end sizes
# ## Indexing
@memoize_outfile(ext=".ndx")
def refindex(ref_file, kmer_size=None, step_size=None, out_file=None):
cl = ["novoindex"]
if kmer_size:
cl += ["-k", str(kmer_size)]
if step_size:
cl += ["-s", str(step_size)]
cl += [out_file, ref_file]
subprocess.check_call(cl)
# ## Galaxy integration
# Optional galaxy location file. Falls back on remap_index_fn if not found
galaxy_location_file = "novoalign_indices.loc"
def remap_index_fn(ref_file):
"""Map sequence references to equivalent novoalign indexes.
"""
checks = [os.path.splitext(ref_file)[0].replace("/seq/", "/novoalign/"),
os.path.splitext(ref_file)[0] + ".ndx",
ref_file + ".bs.ndx",
ref_file + ".ndx"]
for check in checks:
if os.path.exists(check):
return check
return checks[0]
|
bruecksen/isimip
|
isi_mip/pages/migrations/0013_formpage_button_name.py
|
Python
|
mit
| 500
| 0.002
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-25 15:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0012_auto_20160519_1740'),
]
operations = [
migrations.AddField(
model_name='formpage',
name='button_name',
|
field=models.CharField(default='Subm
|
it', max_length=500, verbose_name='Button name'),
),
]
|
oxagast/hashnet
|
stratum-mining-proxy/mining_libs/stratum_listener.py
|
Python
|
gpl-2.0
| 7,466
| 0.008438
|
import time
import binascii
import struct
from twisted.internet import defer
from stratum.services import GenericService
from stratum.pubsub import Pubsub, Subscription
from stratum.custom_exceptions import ServiceException, RemoteServiceException
from jobs import JobRegistry
import stratum.logger
log = stratum.logger.get_logger('proxy')
def var_int(i):
if i <= 0xff:
return struct.pack('>B', i)
elif i <= 0xffff:
return struct.pack('>H', i)
raise Exception("number is too big")
class UpstreamServiceException(ServiceException):
code = -2
class SubmitException(ServiceException):
code = -2
class DifficultySubscription(Subscription):
event = 'mining.set_difficulty'
difficulty = 1
@classmethod
def on_new_difficulty(cls, new_difficulty):
cls.difficulty = new_difficulty
cls.emit(new_difficulty)
def after_subscribe(self, *args):
self.emit_single(self.difficulty)
class MiningSubscription(Subscription):
'''This subscription object implements
logic for broadcasting new jobs to the clients.'''
event = 'mining.notify'
last_broadcast = None
@classmethod
def disconnect_all(cls):
for subs in Pubsub.iterate_subscribers(cls.event):
if subs.connection_ref().transport != None:
subs.connection_ref().transport.loseConnection()
@classmethod
def on_template(cls, job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, clean_jobs):
'''Push new job to subscribed clients'''
cls.last_broadcast = (job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, clean_jobs)
cls.emit(job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, clean_jobs)
def _finish_after_subscribe(self, result):
'''Send new job to newly subscribed client'''
try:
(job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, _) = self.last_broadcast
except Exception:
log.error("Template not ready yet")
return result
self.emit_single(job_id, prevhash, coinb1, coinb2, merkle_branch, version, nbits, ntime, True)
return result
def after_subscribe(self, *args):
'''This will send new job to the client *after* he receive subscription details.
on_finish callback solve the issue that job is broadcasted *during*
the subscription request and client receive messages in wrong order.'''
self.connection_ref().on_finish.addCallback(self._finish_after_subscribe)
class StratumProxyService(GenericService):
service_type = 'mining'
service_vendor = 'mining_proxy'
is_default = True
_f = None # Factory of upstream Stratum connection
custom_user = None
custom_password = None
extranonce1 = None
extranonce2_size = None
tail_iterator = 0
registered_tails= []
@classmethod
def _set_upstream_factory(cls, f):
cls._f = f
@classmethod
def _set_custom_user(cls, custom_user, custom_password):
cls.custom_user = custom_user
cls.custom_password = custom_password
@classmethod
def _set_extranonce(cls, extranonce1, extranonce2_size):
cls.extranonce1 = extranonce1
cls.extranonce2_size = extranonce2_size
@classmethod
def _get_unused_tail(cls):
'''Currently adds up to two bytes to extranonce1,
limiting proxy for up to 65535 connected clients.'''
for _ in range(0, 0xffff): # 0-65535
cls.tail_iterator += 1
cls.tail_iterator %= 0xffff
# Zero extranonce is reserved for getwork connections
if cls.tail_iterator == 0:
cls.tail_iterator += 1
# var_int throws an exception when input is >= 0xffff
tail = var_int(cls.tail_iterator)
tail_len = len(tail)
if tail not in cls.registered_tails:
cls.registered_tails.append(tail)
return (binascii.hexlify(tail), cls.extranonce2_size - tail_len)
raise Exception("Extranonce slots are full, please disconnect some miners!")
def _drop_tail(self, result, tail):
tail = binascii.unhexlify(tail)
if tail in self.registered_tails:
self.registered_tails.remove(tail)
else:
log.error("Given extranonce is not registered1")
return result
@defer.inlineCallbacks
def authorize(self, worker_name, worker_password, *args):
if self._f.client == None or not self._f.client.connected:
yield self._f.on_connect
if self.custom_user != None:
# Already subscribed by main()
defer.returnValue(True)
result = (yield self._f.rpc('mining.authorize', [worker_name, worker_password]))
defer.returnValue(result)
@defer.inlineCallbacks
def subscribe(self, *args):
if self._f.client == None or not self._f.client.connected:
yield self._f.on_connect
if self._f.client == None or not self._f.client.connected:
raise UpstreamServiceException("Upstream not connected")
if self.extranonce1 == None:
# This should never happen, because _f.on_connect is fired *after*
# connection receive mining.subscribe response
raise UpstreamServiceException("Not subscribed on upstream yet")
(tail, extranonce2_size) = self._get_unused_tail()
session = self.connection_ref().get_session()
session['tail'] = tail
# Remove extranonce from registry when client disconnect
self.connection_ref().on_disconnect.addCallback(self._drop_tail, tail)
subs1 = Pubsub.subscribe(self.connection_ref(), DifficultySubscription())[0]
subs2 = Pubsub.subscribe(self.connection_ref(), MiningSubscription())[0]
defer.returnValue(((subs1, subs2),) + (self.extranonce1+tail, extranonce2_size))
@defer.inlineCallbacks
def submit(self, worker_name, job_id,
|
extranonce2, ntime, nonce, *args):
if self._f.client == None or not self._f.client.connected:
raise SubmitException("Upstream not connected")
session = self.connection_ref().get_session()
tail = session.get('tail')
if tail == None:
raise SubmitException("Connection is not subscribed")
if self.custom_user:
worker_name = self.custom_user
start = time.time()
try:
result = (yie
|
ld self._f.rpc('mining.submit', [worker_name, job_id, tail+extranonce2, ntime, nonce]))
except RemoteServiceException as exc:
response_time = (time.time() - start) * 1000
log.info("[%dms] Share from '%s' REJECTED: %s" % (response_time, worker_name, str(exc)))
raise SubmitException(*exc.args)
response_time = (time.time() - start) * 1000
log.info("[%dms] Share from '%s' accepted, diff %d" % (response_time, worker_name, DifficultySubscription.difficulty))
defer.returnValue(result)
def get_transactions(self, *args):
log.warn("mining.get_transactions isn't supported by proxy")
return []
|
gkaramanolakis/adsm
|
adsm/feature_extraction.py
|
Python
|
gpl-3.0
| 3,139
| 0.021982
|
import librosa
import numpy as np
import help_functions
def extract_mfccdd(fpath, n_mfcc=13, winsize=0.25, sampling_rate=16000):
'''
Compute MFCCs, first and second derivatives
:param fpath: the file path
:param n_mfcc: the number of MFCC coefficients. Default = 13 coefficients
:param winsize: the time length of the window for MFCC extraction. Default 0.25s (250ms)
:param sampling_rate: the sampling rate. The file is loaded and converted to the specified sampling rate.
:return: a 2D numpy matrix (frames * MFCCdd)
'''
help_functions.check_existence(fpath)
data, sr = librosa.load(fpath, sr=sampling_rate, mono=True)
winlen = int(2 * winsize * sr)
winstep = int(winlen / 2.0)
mfccs = librosa.feature.mfcc(y=data, sr=sr, n_mfcc=n_mfcc, n_fft=winlen, hop_length=winstep)
deltas = libr
|
osa.feature.delta(mfccs)
deltadeltas = librosa.feature.delta(deltas)
mfccdd = np.concatenate((mfccs, deltas, deltadeltas), axis=1)
return mfccdd
def extract_multiple_features(fpath, n_mfcc=13, sampling_rate=16000):
chroma_feature = librosa.feature.chroma_stft(fpath, sampling_rate) # 12
mfcc_feature = librosa.featu
|
re.mfcc(fpath, sampling_rate, n_mfcc=n_mfcc) # default = 20
rmse_feature = librosa.feature.rmse(fpath) # 1
spectral_centroid_feature = librosa.feature.spectral_centroid(fpath, sampling_rate) #1
spectral_bandwidth_feature = librosa.feature.spectral_bandwidth(fpath, sampling_rate) #1
#spectral_contrast_feature = librosa.feature.spectral_contrast(data,rate) #7
spectral_rolloff_feature = librosa.feature.spectral_rolloff(fpath, sampling_rate) #1
poly_features = librosa.feature.poly_features(fpath, sampling_rate) #2
#tonnetz_feature = librosa.feature.tonnetz(data,rate) #6
zero_crossing_rate_feature = librosa.feature.zero_crossing_rate(fpath, sampling_rate) #1
l = len(chroma_feature[0])
chroma_feature = np.reshape(chroma_feature,[l ,len(chroma_feature)])
mfcc_feature = np.reshape(mfcc_feature,[l ,len(mfcc_feature)])
rmse_feature = np.reshape(rmse_feature,[l ,len(rmse_feature)])
spectral_centroid_feature = np.reshape(spectral_centroid_feature,[l ,len(spectral_centroid_feature)])
spectral_bandwidth_feature = np.reshape(spectral_bandwidth_feature,[l ,len(spectral_bandwidth_feature)])
#spectral_contrast_feature = np.reshape(spectral_contrast_feature,[l ,len(spectral_contrast_feature)])
spectral_rolloff_feature = np.reshape(spectral_rolloff_feature,[l ,len(spectral_rolloff_feature)])
poly_features = np.reshape(poly_features,[l ,len(poly_features)])
#tonnetz_feature = np.reshape(tonnetz_feature,[l ,len(tonnetz_feature)])
zero_crossing_rate_feature = np.reshape(zero_crossing_rate_feature,[l ,len(zero_crossing_rate_feature)])
# Concatenate all features to a feature vector (length = 32)
features = np.concatenate((chroma_feature,mfcc_feature,rmse_feature,
spectral_centroid_feature,spectral_bandwidth_feature,
spectral_rolloff_feature, poly_features,
zero_crossing_rate_feature),axis=1)
return features
|
mjirik/discon
|
discon/discon_tools.py
|
Python
|
mit
| 600
| 0.003333
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
from loguru import logger
from pathlib import Path
def check_meta_yaml_for_noarch(fn:Path, text=None):
import re
logger.debug("C
|
hecking for noarch")
if text is None:
with open(fn, "rt") as fl:
text = fl.read()
mo = re.search(r"\n\s*noarch_python:\s*True", text)
if mo:
logger.info("Detected conda noarch python")
return True
mo = re.search(r"\n\s*noarch:\s*python", text)
if mo:
logger.info("Detected conda noarch python")
return True
return Fa
|
lse
|
alfredodeza/potasio
|
potasio/controllers/root.py
|
Python
|
bsd-3-clause
| 411
| 0
|
from pecan import expose, conf
from potasio.controllers.dashboards import DashboardController
class RootController(object):
@expose(template='index.html')
def index(self
|
):
dashboards = conf.dashboards.to_dict()
return dict(
dashboards=dashboards.keys()
)
@expose()
def _lookup(self, name, *remainder):
r
|
eturn DashboardController(name), remainder
|
dvor85/kmotion
|
bin/reboot_cam.py
|
Python
|
gpl-3.0
| 523
| 0.001912
|
#!/usr/bin/env python
import os
import sys
def usage():
print "{0} <feed>".format(os.path.basename(__file__))
if __name__ == '__main__':
kmotion_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(kmotion_dir)
from core.camera_lost import CameraLost
feed = ''
if len(sys.argv) > 1:
feed = sys
|
.argv[1]
cam_lost =
|
CameraLost(kmotion_dir, feed)
if cam_lost.reboot_camera():
sys.exit()
else:
usage()
sys.exit(1)
|
nhsb1/PatternTargetFinder
|
ptf.py
|
Python
|
gpl-3.0
| 3,499
| 0.025722
|
from argparse import ArgumentParser
from yahoo_finance import Share
import pyperclip
import urllib2
from bs4 import BeautifulSoup
currentrelease = 'Pattern Target Finder 1.2'
#v1.2 - Added get earnings date function
#v1.1 - Get's current delayed price from Yahoo_Finance
# Allowed you to override price -p 123
# Copies output to the clipboard
#v1.0 - calculates the pattern's target
#feature - get earning date? -e ? Screen scrape; woudl be nice to ha
|
ve.
def getArgs():
parser = ArgumentParser(description = currentrelease)
parser.add_argument("-t", "--ticker", required=False, dest="ticker", help="t
|
icker for lookup", metavar="ticker")
parser.add_argument("-p", "--price", required=False, dest="price", help="specify price", metavar="price")
parser.add_argument("-rh", "--high", required=True, dest="high", help="recent high", metavar="high")
parser.add_argument("-rl", "--low", required=True, dest="low", help="recent low", metavar="low")
parser.add_argument("-e", "--earnings", required=False, dest="earnings", help="get earnings date", action="store_true")
#parser.add_argument("-perf", "--perfcheck", required=False, dest="perfcheck", help="use with init file to check performance", action="store_true")
args = parser.parse_args()
return args
def patternmove(high, low):
move = float(high) - float(low)
return move
def patterntarget(high, move):
target = float(high) + float(move)
return target
def priceWork(stock):
"""
Returns delayed price for ticker and reporting for ticker from Yahoo_Finance module (Yahoo_finance module delayed price can be VERY delayed, hence realtime module)
"""
subpricequote = stock.get_price()
#output.append(subpricequote)
return subpricequote
#def clipboard1(ticker, price, move, target):
def earningsdate(ticker):
baseurl = 'https://www.zacks.com/stock/quote/'
endurl = ticker
url = baseurl + ticker
page = urllib2.urlopen(url)
soup = BeautifulSoup(page.read(), "lxml")
earnings = soup.find_all("td")
return earnings[58].text #item 58 gives you *AMC8/4/16 or 10/20/2016 or BMO8/4/16
#----------------------------------------------------------------------
if __name__ == "__main__":
myargs = getArgs()
if myargs.ticker is not "" and myargs.price is None: #if you don't specify -p price then it get's the current delayed price
stock = Share(myargs.ticker)
price = priceWork(stock)
elif myargs.ticker is not "" and myargs.price > 0: #if you specify price, then it uses that as the starting price.
#print myargs.ticker, myargs.price
stock = myargs.ticker
price = myargs.price
move = patternmove(myargs.high, myargs.low)
target = patterntarget(myargs.high, move)
if myargs.earnings is True:
earnings = earningsdate(myargs.ticker)
else:
earnings = None
#print target
if myargs.ticker is not "" and price is not None and earnings is not None:
print myargs.ticker + ", " + str(price) + ", " + str(target) + ", " + str(earningsdate(myargs.ticker))
newpasteitem = myargs.ticker + ", " + str(price) + ", " + str(target) + ", " + str(earningsdate(myargs.ticker))
pyperclip.copy(newpasteitem)
elif myargs.ticker is not "" and price is not None:
print myargs.ticker + ", " + str(price) + ", " + str(target)
newpasteitem = myargs.ticker + ", " + str(price) + ", " + str(target)
pyperclip.copy(newpasteitem)
else:
print target
newpasteitem = str(target)
pyperclip.copy(newpasteitem)
# if myargs.earnings is True:
# print earningsdate(myargs.ticker)
|
jdevesa/gists
|
gists/gists.py
|
Python
|
mit
| 12,061
| 0
|
# Copyright (c) 2012 <Jaume Devesa (jaumedevesa@gmail.com)>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
gists.gists
~~~~~~~~~~~
This single-function module defines the input parameters and the subparsers,
and coordinates the 'handlers'->'actions'->'formatters' execution workflow
"""
import argparse
from actions import (list_gists, show, get, post, delete, update, authorize,
fork, star, unstar)
from handlers import (handle_list, handle_show, handle_update,
handle_authorize, handle_get, handle_post, handle_delete,
handle_fork, handle_star)
from formatters import (format_list, format_post, format_update,
format_get, format_show, format_delete,
format_authorize, format_star)
from version import VERSION
USER_MSG = ("github username. Use this user instead of the defined one in "
"the configuration file. If action demands authentication, a "
"password request will be prompt")
GIST_ID_MSG = ("identifier of the Gist. Execute 'gists list' to know Gists "
"identifiers")
def run(*args, **kwargs):
# Initialize argument's parser
description = 'Manage Github gists from CLI'
parser = argparse.ArgumentParser(description=description,
epilog="Happy Gisting!")
# Define subparsers to handle each action
subparsers = parser.add_subparsers(help="Available commands.")
# Add the subparsers
__add_list_parser(subparsers)
__add_show_parser(subparsers)
__add_get_parser(subparsers)
__add_create_parser(subparsers)
__add_update_parser(subparsers)
__add_delete_parser(subparsers)
__add_authorize_parser(subparsers)
__add_version_parser(subparsers)
__add_fork_parser(subparsers)
__add_star_parser(subparsers)
__add_unstar_parser(subparsers)
# Parse the arguments
args = parser.parse_args()
# Calling the handle_args function defined, parsing the args and return
# and object with the needed values to execute the function
parameters = args.handle_args(args)
# Passing the 'parameters' object as array of parameters
result = args.func(*parameters)
# Parsing the 'result' object to be output formatted.
# (that must be a single object)
result_formatted = args.formatter(result)
# Print the format
|
ted output
print result_formatted
def __add_list_parser(subparsers):
""" Define the subparser to handle the 'list' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the list of gists
parser_list = subparsers.
|
add_parser("list", help="list a user's Gists")
parser_list.add_argument("-u", "--user", help=USER_MSG)
group1 = parser_list.add_mutually_exclusive_group()
group1.add_argument("-p", "--private", help="""return the private gists
besides the public ones. Needs authentication""",
action="store_true")
group1.add_argument("-s", "--starred", help="""return ONLY the starred
gists. Needs authentication""", action="store_true")
parser_list.set_defaults(handle_args=handle_list,
func=list_gists, formatter=format_list)
def __add_show_parser(subparsers):
""" Define the subparser to handle with the 'show' functionallity.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'show' action
parser_show = subparsers.add_parser("show", help="""show a Gist. Shows
Gist metadata by default.
With '-f' (--filename) option, shows
the content of one of the Gist files
""")
parser_show.add_argument("gist_id", help=GIST_ID_MSG)
parser_show.add_argument("-f", "--filename", help="gist file to show")
parser_show.set_defaults(handle_args=handle_show, func=show,
formatter=format_show)
def __add_get_parser(subparsers):
""" Define the subparser to handle the 'get' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'get' action
parser_get = subparsers.add_parser("get", help="""download a single gist
file. If the gist has just a single
file, argument '-f' (--filename) is not
needed""")
parser_get.add_argument("gist_id", help=GIST_ID_MSG)
parser_get.add_argument("-f", "--filename", help="file to download")
parser_get.add_argument("-o", "--output_dir", help="destination directory",
default=".")
parser_get.set_defaults(handle_args=handle_get, func=get,
formatter=format_get)
def __add_create_parser(subparsers):
""" Define the subparser to handle the 'create' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'create' action
parser_post = subparsers.add_parser("create", help="""create a new gist.
Needs authentication""")
parser_post.add_argument("-u", "--user", help=USER_MSG)
parser_post.add_argument("-f", "--filenames", nargs='+', help="""specify
files to upload with Gist creation""",
required=True)
parser_post.add_argument("-p", "--private", help="""private Gist? ('false'
by default)""", action="store_true")
parser_post.add_argument("-i", "--input_dir", help="""input directory where
the source files are""")
parser_post.add_argument("-d", "--description", help="""description for
the Gist to create""")
parser_post.set_defaults(handle_args=handle_post, func=post,
formatter=format_post)
def __add_update_parser(subparsers):
""" Define the subparser to handle the 'update' functionality.
:param subparsers: the subparser entity
"""
# Add the subparser to handle the 'update' action
parser_update = subparsers.add_parser("update", help="""update a gist.
Needs authentication""")
parser_update.add_argument("gist_id", help=GIST_ID_MSG)
parser_update.add_argument("-u", "--user", help=USER_MSG)
group1 = parser_update.add_argument_group("file options",
"update Gist files")
group1.add_argument("-f", "--filenames", nargs='+',
help="Gist files to update")
group11 = group1.add_mutually_exclusive_group()
group11.add_argument("-n", "--new", action="store_true", help="""files
supplied are new for the Gist. '-f' (--filenames)
argument needed""",
default=False)
group11.add_argument("-r", "--remove", action="store_true",
|
chromium/chromium
|
third_party/tensorflow-text/src/tensorflow_text/python/keras/layers/todense_test.py
|
Python
|
bsd-3-clause
| 8,393
| 0.002264
|
# coding=utf-8
# Copyright 2021 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ToDense Keras layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from keras import keras_parameterized
from keras import testing_utils
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow_text.python.keras.layers.todense import ToDense
class Final(tf.keras.layers.Layer):
"""This is a helper layer that can be used as the last layer in a network for testing purposes."""
def call(self, inputs):
return tf.dtypes.cast(inputs, tf.dtypes.float32)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super(Final, self).get_config()
return dict(list(base_config.items()))
def get_input_dataset(in_data, out_data=None):
batch_size = in_data.shape[0]
if out_data is None:
return tf.data.Dataset.from_tensor_slices(in_data).batch(
batch_size)
return tf.data.Dataset.from_tensor_slices(
(in_data, out_data)).batch(batch_size)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class RaggedTensorsToDenseLayerTest(keras_parameterized.TestCase):
def SKIP_test_ragged_input_default_padding(self):
input_data = get_input_dataset(
tf.ragged.constant([[1, 2, 3, 4, 5], [2, 3]]))
expected_output = np.array([[1, 2, 3, 4, 5], [2, 3, 0, 0, 0]])
layers = [ToDense(), Final()]
model = testing_utils.get_model_from_layers(
layers,
input_shape=(None,),
input_ragged=True,
input_dtype=tf.dtypes.int32)
model.compile(
optimizer="
|
sgd",
loss="mse",
metrics=["accuracy"],
run_eagerly=testing_utils.should_run_
|
eagerly())
output = model.predict(input_data)
self.assertAllEqual(output, expected_output)
def SKIP_test_ragged_input_with_padding(self):
input_data = get_input_dataset(
tf.ragged.constant([[[1, 2, 3, 4, 5]], [[2], [3]]]))
expected_output = np.array([[[1., 2., 3., 4., 5.],
[-1., -1., -1., -1., -1.]],
[[2., -1., -1., -1., -1.],
[3., -1., -1., -1., -1.]]])
layers = [ToDense(pad_value=-1), Final()]
model = testing_utils.get_model_from_layers(
layers,
input_shape=(None, None),
input_ragged=True,
input_dtype=tf.dtypes.int32)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
run_eagerly=testing_utils.should_run_eagerly())
output = model.predict(input_data)
self.assertAllEqual(output, expected_output)
def test_ragged_input_pad_and_mask(self):
input_data = tf.ragged.constant([[1, 2, 3, 4, 5], []])
expected_mask = np.array([True, False])
output = ToDense(pad_value=-1, mask=True)(input_data)
self.assertTrue(hasattr(output, "_keras_mask"))
self.assertIsNot(output._keras_mask, None)
self.assertAllEqual(
tf.keras.backend.get_value(output._keras_mask), expected_mask)
def test_ragged_input_shape(self):
input_data = get_input_dataset(
tf.ragged.constant([[1, 2, 3, 4, 5], [2, 3]]))
expected_output = np.array([[1, 2, 3, 4, 5, 0, 0], [2, 3, 0, 0, 0, 0, 0]])
layers = [ToDense(shape=[2, 7]), Final()]
model = testing_utils.get_model_from_layers(
layers,
input_shape=(None,),
input_ragged=True,
input_dtype=tf.dtypes.int32)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
run_eagerly=testing_utils.should_run_eagerly())
output = model.predict(input_data)
self.assertAllEqual(output, expected_output)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(layer=[
tf.keras.layers.SimpleRNN, tf.compat.v1.keras.layers.GRU,
tf.compat.v1.keras.layers.LSTM, tf.keras.layers.GRU,
tf.keras.layers.LSTM
]))
def SKIP_test_ragged_input_RNN_layer(self, layer):
input_data = get_input_dataset(
tf.ragged.constant([[1, 2, 3, 4, 5], [5, 6]]))
layers = [
ToDense(pad_value=7, mask=True),
tf.keras.layers.Embedding(8, 16),
layer(16),
tf.keras.layers.Dense(3, activation="softmax"),
tf.keras.layers.Dense(1, activation="sigmoid")
]
model = testing_utils.get_model_from_layers(
layers,
input_shape=(None,),
input_ragged=True,
input_dtype=tf.dtypes.int32)
model.compile(
optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"],
run_eagerly=testing_utils.should_run_eagerly())
output = model.predict(input_data)
self.assertAllEqual(np.zeros((2, 1)).shape, output.shape)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class SparseTensorsToDenseLayerTest(keras_parameterized.TestCase):
def SKIP_test_sparse_input_default_padding(self):
input_data = get_input_dataset(
tf.sparse.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]))
expected_output = np.array([[1., 0., 0., 0.], [0., 0., 2., 0.],
[0., 0., 0., 0.]])
layers = [ToDense(), Final()]
model = testing_utils.get_model_from_layers(
layers,
input_shape=(None,),
input_sparse=True,
input_dtype=tf.dtypes.int32)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
run_eagerly=testing_utils.should_run_eagerly())
output = model.predict(input_data)
self.assertAllEqual(output, expected_output)
def SKIP_test_sparse_input_with_padding(self):
input_data = get_input_dataset(
tf.sparse.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]))
expected_output = np.array([[1., -1., -1., -1.], [-1., -1., 2., -1.],
[-1., -1., -1., -1.]])
layers = [ToDense(pad_value=-1, trainable=False), Final()]
model = testing_utils.get_model_from_layers(
layers,
input_shape=(None,),
input_sparse=True,
input_dtype=tf.dtypes.int32)
model.compile(
optimizer="sgd",
loss="mse",
metrics=["accuracy"],
run_eagerly=testing_utils.should_run_eagerly())
output = model.predict(input_data)
self.assertAllEqual(output, expected_output)
def test_sparse_input_pad_and_mask(self):
input_data = tf.sparse.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
expected_mask = np.array([True, True, False])
output = ToDense(pad_value=-1, mask=True)(input_data)
self.assertTrue(hasattr(output, "_keras_mask"))
self.assertIsNot(output._keras_mask, None)
self.assertAllEqual(
tf.keras.backend.get_value(output._keras_mask), expected_mask)
def test_sparse_input_shape(self):
input_data = get_input_dataset(
tf.sparse.SparseTensor(
indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4]))
expected_output = np.array([[1., 0., 0., 0.], [0., 0., 2., 0.],
[0., 0., 0., 0.]])
layers = [ToDense(shape=[3, 4]), Final()]
model = testing_utils.get_model_from_layers(
layers,
input_shape=(None,),
input_sparse=True,
input_dtype=tf.dtypes.int32)
model.compile(
optimizer="sgd",
loss="mse",
|
rave-engine/rave
|
tests/support/filesystem.py
|
Python
|
bsd-2-clause
| 4,963
| 0.006045
|
import os
import codecs
from io import StringIO
from pytest import fixture
from rave import filesystem
class DummyProvider:
def __init__(self, files):
self.files = files;
def list(self):
return self.files
def has(self, filename):
return filename in self.list()
def open(self, filename, *args, **kwargs):
if not self.has(filename):
raise filesystem.FileNotFound(filename)
if not self.isfile(filename):
raise filesy
|
stem.NotAFile(filename)
return DummyFile(self, filename)
def isfile(self, filename):
return self.has(filename) and '.' in filena
|
me
def isdir(self, filename):
return self.has(filename) and not self.isfile(filename)
class FaultyProvider(DummyProvider):
def __init__(self, files, faulty_files, err=filesystem.FileNotFound):
super().__init__(files)
self.faulty_files = faulty_files
self.error_class = err
def open(self, filename, *args, **kwargs):
if filename in self.faulty_files:
raise self.error_class(filename)
return super().open(filename, *args, **kwargs)
class DummyFile(filesystem.File):
def __init__(self, parent, filename, content='merry saltmas'):
self.parent = parent
self.filename = filename
self._buffer = StringIO(content)
self._closed = False
def close(self):
if self._closed:
raise filesystem.FileClosed(self.filename)
self._closed = True
def opened(self):
return not self._closed
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return True
def read(self, amount=None):
if self.closed:
raise filesystem.FileClosed(self.filename)
return self._buffer.read(amount)
def write(self, buffer):
if self.closed:
raise filesystem.FileClosed(self.filename)
return self._buffer.write(buffer)
def seek(self, offset, mode=os.SEEK_CUR):
return self._buffer.seek(offset, mode)
def tell(self):
return self._buffer.tell()
class DummyTransformer:
CONSUME = False
RELATIVE = False
def __init__(self, filename, handle):
self.filename = filename
self.handle = handle
self.files = [ self.filename + '.rot13' ]
def list(self):
return self.files
def has(self, filename):
return filename in self.list()
def open(self, filename, *args, **kwargs):
if not self.has(filename):
raise filesystem.FileNotFound(filename)
return ROT13File(self, filename, self.handle)
def isfile(self, filename):
return self.has(filename)
def isdir(self, filename):
return False
def relative(self):
return self.RELATIVE
def consumes(self):
return self.CONSUME
def valid(self):
return True
class FaultyTransformer:
def __init__(self, filename, handle):
raise FileNotFound(filename)
class InvalidTransformer(DummyTransformer):
def valid(self):
return False
class ROT13File(filesystem.File):
def __init__(self, parent, filename, handle):
self.parent = parent
self.filename = filename
self.handle = handle
def close(self):
return self.handle.close()
def opened(self):
return self.handle.opened()
def readable(self):
return self.handle.readable()
def writable(self):
return self.handle.writable()
def seekable(self):
return self.handle.seekable()
def read(self, amount=None):
return codecs.encode(self.handle.read(amount), 'rot13')
def write(self, buffer):
return self.handle.write(codecs.encode(buffer, 'rot13'))
def seek(self, offset, mode=os.SEEK_CUR):
return self.handle.seek(offset, mode)
def tell(self):
return self.handle.tell()
@fixture
def fs():
return filesystem.FileSystem()
@fixture
def dummyfs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' }))
return fs
@fixture
def nestedfs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/y', '/y/c.txt', '/y/p.png', '/y/z' }))
return fs
@fixture
def parentlessfs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/z/k.txt' }))
return fs
@fixture
def doublefs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' }))
fs.mount('/y', DummyProvider({ '/c.exe', '/d.jpg' }))
return fs
@fixture
def mergedfs():
fs = filesystem.FileSystem()
fs.mount('/x', DummyProvider({ '/a.txt', '/b.png' }))
fs.mount('/x', DummyProvider({ '/c.exe', '/d.jpg' }))
return fs
@fixture
def transfs():
fs = dummyfs()
fs.transform('\.txt$', DummyTransformer)
return fs
|
reddec/pika
|
pika/adapters/select_connection.py
|
Python
|
bsd-3-clause
| 21,137
| 0.000378
|
"""A connection adapter that tries to use the best polling method for the
platform pika is running on.
"""
import os
import logging
import socket
import select
import errno
import time
from operator import itemgetter
from collections import defaultdict
import threading
import pika.compat
from pika.compat import dictkeys
from pika.adapters.base_connection import BaseConnection
LOGGER = logging.getLogger(__name__)
# One of select, epoll, kqueue or poll
SELECT_TYPE = None
# Use epoll's constants to keep life easy
READ = 0x0001
WRITE = 0x0004
ERROR = 0x0008
if pika.compat.PY2:
_SELECT_ERROR = select.error
else:
# select.error was deprecated and replaced by OSError in python 3.3
_SELECT_ERROR = OSError
def _get_select_errno(error):
if pika.compat.PY2:
assert isinstance(error, select.error), repr(error)
return error.args[0]
else:
assert isinstance(error, OSError), repr(error)
return error.errno
class SelectConnection(BaseConnection):
"""An asynchronous connection adapter that attempts to use the fastest
event loop adapter for the given platform.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
stop_ioloop_on_close=True,
custom_ioloop=None):
"""Create a new instance of the Connection object.
:param pika.connection.Parameters parameters: Connection parameters
:param method on_open_callback: Method to call on connection open
:param on_open_error_callback: Method to call if the connection cant
be opened
:type on_open_error_callback: method
:param method on_close_callback: Method to call on connection close
:param bool stop_ioloop_on_close: Call ioloop.stop() if disconnected
:param custom_ioloop: Override using the global IOLoop in Tornado
:raises: RuntimeError
"""
ioloop = custom_ioloop or IOLoop()
super(SelectConnection, self).__init__(parameters, on_open_callback,
on_open_error_callback,
on_close_callback, ioloop,
stop_ioloop_on_close)
def _adapter_connect(self):
"""Connect to the RabbitMQ broker, returning True on success, False
on failure.
:rtype: bool
"""
error = super(SelectConnection, self)._adapter_connect()
if not error:
self.ioloop.add_handler(self.socket.fileno(), self._handle_events,
self.event_state)
return error
def _adapter_disconnect(self):
"""Disconnect from the RabbitMQ broker"""
if self.socket:
self.ioloop.remove_handler(self.socket.fileno())
super(SelectConnection, self)._adapter_disconnect()
class IOLoop(object):
"""Singlton wrapper that decides which type of poller to use, creates an
instance of it in start_poller and keeps the invoking application in a
blocking state by calling the pollers start method. Poller should keep
looping until IOLoop.instance().stop() is called or there is a socket
error.
Passes through all operations to the loaded poller object.
"""
def __init__(self):
self._poller = self._get_poller()
def __getattr__(self, attr):
return getattr(self._poller, attr)
def _get_poller(self):
"""Determine the best poller to use for this enviroment."""
poller = None
if hasattr(select, 'epoll'):
if not SELECT_TYPE or SELECT_TYPE == 'epoll':
LOGGER.debug('Using EPollPoller')
poller = EPollPoller()
if not poller and hasattr(select, 'kqueue'):
if not SELECT_TYPE or SELECT_TYPE == 'kqueue':
LOGGER.debug('Using KQueuePoller')
poller = KQueuePoller()
if (not poller and hasattr(select, 'poll') and
hasattr(select.poll(), 'modify')): # pylint: disable=E1101
if not SELECT_TYPE or SELECT_TYPE == 'poll':
LOGGER.debug('Using PollPoller')
poller = PollPoller()
if not poller:
LOGGER.debug('Using SelectPoller')
poller = SelectPoller()
return poller
class SelectPoller(object):
"""Default behavior is to use Select since it's the widest supported and has
all of the methods we need for child classes as well. One should only need
to override the update_handler and start methods for additional types.
"""
# Drop out of the poll loop every POLL_TIMEOUT secs as a worst case, this
# is only a backstop value. We will run timeouts when they are scheduled.
POLL_TIMEOUT = 5
# if the poller uses MS specify 1000
POLL_TIMEOUT_MULT = 1
def __init__(self):
"""Create an instance of the SelectPoller
"""
# fd-to-handler function mappings
self._fd_handlers = dict()
# event-to-fdset mappings
self._fd_events = {READ: set(), WRITE: set(), ERROR: set()}
self._stopping = False
self._timeouts = {}
self._next_timeout = None
self._processing_fd_event_map = {}
# Mutex for controlling critical sections where ioloop-interrupt sockets
# are created, used, and destroyed. Needed in case `stop()` is called
# from a thread.
self._mutex = threading.Lock()
# ioloop-interrupt socket pair; initialized in start()
self._r_interrupt = None
self._w_interrupt = None
def get_interrupt_pair(self):
""" Use a socketpair to be able to interrupt the ioloop if called
from another thread. Socketpair() is not supported on some OS (Win)
so use a pair of simple UDP sockets instead. The sockets will be
closed and garbage collected by python when the ioloop itself is.
"
|
""
try:
read_sock, write_sock = socket.socketpair()
except AttributeError:
LOGGER.debug("Using custom socketpair for interrupt")
read_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
read_sock.bind(('localhost', 0))
write_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
write_sock.connect(read_sock.getsockname())
read_sock.setblockin
|
g(0)
write_sock.setblocking(0)
return read_sock, write_sock
def read_interrupt(self, interrupt_sock,
events, write_only): # pylint: disable=W0613
""" Read the interrupt byte(s). We ignore the event mask and write_only
flag as we can ony get here if there's data to be read on our fd.
:param int interrupt_sock: The file descriptor to read from
:param int events: (unused) The events generated for this fd
:param bool write_only: (unused) True if poll was called to trigger a
write
"""
try:
os.read(interrupt_sock, 512)
except OSError as err:
if err.errno != errno.EAGAIN:
raise
def add_timeout(self, deadline, callback_method):
"""Add the callback_method to the IOLoop timer to fire after deadline
seconds. Returns a handle to the timeout. Do not confuse with
Tornado's timeout where you pass in the time you want to have your
callback called. Only pass in the seconds until it's to be called.
:param int deadline: The number of seconds to wait to call callback
:param method callback_method: The callback method
:rtype: str
"""
timeout_at = time.time() + deadline
value = {'deadline': timeout_at, 'callback': callback_method}
timeout_id = hash(frozenset(value.items()))
self._timeouts[timeout_id] = value
if not self._next_timeout or timeout_at < self._next_timeout:
self._next_timeout = timeout_at
return timeout_i
|
spellrun/Neural-Photo-Editor
|
gan/sample_ian.py
|
Python
|
mit
| 5,521
| 0.011411
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from collections import OrderedDict
import imp
import time
import logging
import itertools
import os
import numpy as np
from path import Path
import theano
import theano.tensor as T
import lasagne
from fuel.datasets import CelebA
from gan.util import (
from_tanh,
to_tanh,
gan_checkpoints,
)
from gan.util.constants import DEFAULT_RES_DIR
from gan.util.discgen_utils import plot_image_grid
### Make Training Functions Method
# This function defines and compiles the computational graphs that define the training, validation, and test functions.
def make_training_functions(cfg,model):
# Define input tensors
# Tensor axes are batch-channel-dim1-dim2
is_iaf = 'l_Z_IAF' in model.keys()
# Image Input
X = T.TensorType('float32', [False]*4)('X')
# Latent Input, for providing latent values from the main function
Z = T.TensorType('float32', [False]*2)('Z') # Latents
# Model layers
l_in = model['l_in']
l_out = model['l_out']
l_Z = model['l_Z']
l_Z_IAF = l_Z
l_mu = model['l_mu']
l_ls = model['l_ls']
l_introspect = model['l_introspect']
l_discrim = model['l_discrim']
if is_iaf:
l_Z_IAF = model['l_Z_IAF']
l_IAF_mu = model['l_IAF_mu']
l_IAF_ls = model['l_IAF_ls']
# Sample function
sample = theano.function([Z],lasagne.layers.get_output(l_out,{l_Z_IAF:Z},deterministic=True),on_unused_input='warn')
sampleZ = theano.function([Z],lasagne.layers.get_output(l_out,{l_Z:Z},deterministic=True),on_unused_input='warn')
# Inference Function--Infer non-IAF_latents given an input X
Zfn = theano.function([X],lasagne.layers.get_output(l_Z_IAF,{l_in:X},deterministic=True),on_unused_input='warn')
# Dictionary of Theano Functions
tfuncs = {
'sample': sample,
'sampleZ': sampleZ,
'Zfn': Zfn,
}
if is_iaf:
# IAF function--Infer IAF latents given a latent input Z
Z_IAF_fn = theano.function([Z],lasagne.layers.get_output(l_Z,{l_Z_IAF:Z},deterministic=True),on_unused_input='warn')
tfuncs['Z_IAF_fn'] = Z_IAF_fn
# Dictionary of Theano Variables
tvars = {'X' : X,
'Z' : Z}
return tfuncs, tvars, model
# Main Function
def main(args):
# Load Config Module from source file
model_name = os.path.basename(args.config_path)[:-3]
config_module = imp.load_source(model_name, args.config_path)
# Create directory for storing results
res_dir = os.path.join(DEFAULT_RES_DIR, model_name)
if not os.path.isdir(res_dir):
os.makedirs(res_dir)
# Get configuration parameters
cfg = config_module.CFG
# Define name of npz file to which the model parameters will be saved
if args.weights_file is None:
weights_fname = os.path.join(res_dir, "weights.npz")
else:
weights_fname = args.weights_file
model = config_module.get_model(interp=False)
print('Compiling theano functions...')
# Compile functions
tfuncs, tvars,model = make_training_functions(cfg, model)
# Test set for interpolations
test_set = CelebA('64', ('test',), sources=('features',))
# Loop across epochs
offset = True
params = list(set(lasagne.layers.get_all_params(model['l_out'],trainable=True)+\
lasagne.layers.get_all_params(model['l_discrim'],trainable=True)+\
[ x for x in lasagne.layers.get_all_params(model['l_out'])+\
lasagne.layers.get_all_params(model['l_discrim']) if x.name[-4:]=='mean' or x.name[-7:]=='inv_std']))
metadata = gan_checkpoints.load_weights(weights_fname, params)
epoch = args.epoch if args.epoch>0 else metadata['epoch'] if 'epoch' in metadata else 0
print('loading weights, epoch is '+str(epoch))
try:
model['l_IAF_mu'].reset("Once")
model['l_IAF_ls'].reset("Once")
except KeyError:
pass
# Open Test Set
test_set.open()
# Generate Random Samples, averaging latent vectors across masks
latent_idx = np.random.randn(27,cfg['num_latents']).astype(np.float32)
samples = np.uint8(from_tanh(tfuncs['sample'](latent_idx)))
# Get Reconstruction/Interpolation Endpoints
sample_idx = list(np.random.choice(test_set.num_examples, 6, replace=False))
endpoints = np.uint8(test_set.get_data(request=sample_idx)[0])
Ze = np.asarray(tfuncs['Zfn'](to_tanh(np.float32(endpoints))))
Z = np.asarray([ Ze[2*i, :] * (1-j) + Ze[2*i+1, :]*j
for i in xrange(3)
for j in [x/6.0 for x in xrange(7)] ], dtype=np.float32)
interp = np.concatenate([ np.insert(endpoints[2*i:2*(i+1),:,:,:], 1,
np.uint8(from_
|
tanh(tfuncs['sample'](Z[7*i:7*(i+1),:]))),axis=0)
for i in range(3) ], axis=0)
# Get all images
images = np.append(samples, interp, axis=0)
# Plot images
pics_dir = os.path.join(res_dir, "pics")
if not os.path.isdir(pics_dir):
os.makedirs(pics_dir)
img_fname = os.path.join(pics_dir, "{}_{}.png".format(model_name, epoch))
plot_image_grid(images, 6, 9)
# Close test set
test_set.close(state=None)
if __name__
|
=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path', type=Path, help='config .py file')
parser.add_argument('-w', "--weights-file", help='weights file')
args = parser.parse_args()
main(args)
|
pmghalvorsen/gramps_branch
|
gramps/plugins/gramplet/repositorydetails.py
|
Python
|
gpl-2.0
| 5,196
| 0.002694
|
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2011 Nick Hall
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from gramps.gen.lib import UrlType
from gramps.gen.plug import Gramplet
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gi.repository import Gtk
from gi.repository import Pango
class RepositoryDetails(Gramplet):
"""
Displays details for a repository.
"""
def init(self):
self.gui.WIDGET = self.build_gui()
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add_with_viewport(self.gui.WIDGET)
def build_gui(self):
"""
Build the GUI interface.
"""
self.top = Gtk.HBox()
vbox = Gtk.VBox()
self.name = Gtk.Label()
self.name.set_alignment(0, 0)
self.name.modify_font(Pango.FontDescription('sans bold 12'))
vbox.pack_start(self.name, fill=True, expand=False, padding=7)
self.table = Gtk.Table(n_rows=1, n_columns=2)
vbox.pack_start(self.table, fill=True, expand=False, padding=0)
self.top.pack_start(vbox, fill=True, expand=False, padding=10)
self.top.show_all()
return self.top
def add_row(self, title, value):
"""
Add a row to the table.
"""
label = Gtk.Label(label=title + ':')
label.set_alignment(1, 0)
label.show()
value = Gtk.Label(label=value)
value.set_alignment(0, 0)
value.show()
rows = self.table.get_property('n-rows')
rows += 1
self.table.resize(rows, 2)
self.table.attach(label, 0, 1, rows, rows + 1,
xoptions=Gtk.AttachOptions.FILL, xpadding=10)
self.table.attach(value, 1, 2, rows, rows + 1)
def clear_table(self):
"""
Remove all the rows from the table.
"""
list(map(self.table.remove, self.table.get_children()))
self.table.resize(1, 2)
def db_changed(self):
self.dbstate.db.connect('repository-update', self.update)
self.connect_signal('Repository', self.update)
def update_has_data(self):
active_handle = self.get_active('Person')
active_person = self.dbstate.db.get_person_from_handle(active_handle)
self.set_has_data(active_person is not None)
def main(self):
active_handle = self.get_active('Repository')
repo = self.dbstate.db.get_repository_from_handle(active_handle)
self.top.hide()
if repo:
self.display_repo(repo)
self.set_has_data(True)
else:
self.display_empty()
self.set_has_data(False)
self.top.show()
def display_repo(self, repo):
"""
Display details of the active repository.
"""
self.name.set_text(repo.get_name())
self.clear_table()
address_list = repo.get_address_list()
if len(address_list) > 0:
self.display_address(address_list[0])
self.display_separator()
phone = address_list[0].get_phone()
if phone:
self.add_row(_('Phone'), phone)
self.display_url(repo, UrlType(UrlType.EMAIL))
self.display_url(repo, UrlType(UrlType.WEB_HOME))
self.display_url(repo, UrlType(UrlType.WEB_SEARCH))
self.display_url(repo, UrlType(UrlType.WEB_FTP))
def display_address(self, address):
"""
Display an address.
"""
lines = [line for line in address.get_text_data_list()[:-1] if line]
self.add_row(_('Address'), '\n'
|
.join(lines))
def display_url(self, repo, url_type):
"""
Display an url of the given url type.
"""
for url in repo.get_url_list():
if url.get_type() == url_type:
self.add_row(str(url_type), url.get_path())
def display_empty(self):
"""
Display empty details when no repository is selected.
"""
self.name.set_text('')
self.clear_table()
def display_separator(self):
"""
Displa
|
y an empty row to separate groupd of entries.
"""
label = Gtk.Label(label='')
label.modify_font(Pango.FontDescription('sans 4'))
label.show()
rows = self.table.get_property('n-rows')
rows += 1
self.table.resize(rows, 2)
self.table.attach(label, 0, 1, rows, rows + 1,
xoptions=Gtk.AttachOptions.FILL)
|
archetipo/server-tools
|
users_ldap_groups/users_ldap_groups_operators.py
|
Python
|
agpl-3.0
| 2,298
| 0
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2012 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from string import Template
class LDAPOperator:
pass
class contains(LDAPOperator):
def check_value(self,
ldap_entry,
attribute, value,
ldap_config,
company,
logger):
return (attribute in ldap_entry[1] and
value in ldap_entry[1][attribute])
class eq
|
uals(LDAP
|
Operator):
def check_value(self,
ldap_entry,
attribute, value,
ldap_config,
company,
logger):
return (attribute in ldap_entry[1] and
unicode(value) == unicode(ldap_entry[1][attribute]))
class query(LDAPOperator):
def check_value(self,
ldap_entry,
attribute,
value,
ldap_config,
company,
logger):
query_string = Template(value).safe_substitute(dict(
[(attr, ldap_entry[1][attribute][0]) for attr in ldap_entry[1]]
))
logger.debug('evaluating query group mapping, filter: %s',
query_string)
results = company.query(ldap_config, query_string)
logger.debug(results)
return bool(results)
|
aronsky/home-assistant
|
tests/components/modern_forms/test_init.py
|
Python
|
apache-2.0
| 1,806
| 0.000554
|
"""Tests for the Modern Forms integration."""
from unittest.mock import MagicMock, patch
from aiomodernforms import ModernFormsConnectionError
from homeassistant.components.modern_forms.const import DOMAIN
from homeassistant.config_entries import ConfigEntryState
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from tests.components.modern_forms import (
init_integration,
modern_forms_no_light_call_mock,
)
from tests.test_util.aiohttp import AiohttpClientMocker
@patch(
"homeassistant.components.modern_forms.ModernFormsDevice.update",
side_effect=ModernFormsConnectionError,
)
async def test_config_entry_not_ready(
mock_update: MagicMock, hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the Modern Forms configuration entry not ready."""
entry = await init_integration(hass, aioclient_mock)
assert entry.state is ConfigEntryState.SETUP_RETRY
async def test_unload_config_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the Modern Forms configuration entry unloading."""
entry = await init_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
async def test_fan_only_device(hass, aioclient_mock):
"""Test we set unique ID if not set yet."""
await init_integration(
hass, aioclient_mock, mock_type=modern_forms_no_light_call_mock
)
entity_registry = er.async_get(hass)
f
|
an_entry = entity_registry.async_get("fan.modernformsfan_fan")
assert fan_entry
light_entry = entity_registry.async_get("light.modernformsfan_lig
|
ht")
assert light_entry is None
|
mhvk/astropy
|
astropy/cosmology/tests/test_utils.py
|
Python
|
bsd-3-clause
| 2,328
| 0.001289
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from math import inf
import pytest
import numpy as np
from astropy.cosmology.utils import inf_like, vectorize_if_needed, vectorize_redshift_method
from astropy.utils.exceptions import AstropyDeprecationWarning
def test_vectorize_redshift_method():
"""Test :func:`astropy.cosmology.utils.vectorize_redshift_method`."""
class Class:
@vectorize_redshift_method
def method(self, z):
return z
c = Class()
assert hasattr(c.method, "__vectorized__")
assert isinstance(c.method.__vectorized__, np.vectorize)
# calling with Number
assert c.method(1) == 1
assert isinstance(c.method(1), int)
# calling with a numpy scalar
assert c.method(np.float64(1)) == np.float64(1)
assert isinstance(c.method(np.float64(1)), np.float64)
# numpy array
assert all(c.method(np.array([1, 2])) == np.array([1, 2]))
assert isinstance(c.method(np.array([1, 2])), np.ndarray)
# non-scalar
assert all(c.method([1, 2]) == np.array([1, 2]))
assert isinstance(c.method([1, 2]), np.ndarray)
def test_vectorize_if_needed():
"""
Test :func:`astropy.cosmology.utils.vectorize_if_needed`.
There's no need to test 'veckw' because that is directly pasased to
`numpy.vectorize` which thoroughly tests the various inputs.
"""
func = lambda x: x ** 2
with pytest.warns(AstropyDeprecationWarning):
# not vectorized
assert vectorize_if_needed(func, 2) == 4
# vectorized
assert all(vectorize_if_needed(func, [2, 3]) ==
|
[4, 9])
@pytest.mark.parametrize("arr, expected",
[(0.0, inf), # float scalar
(1, inf), # integer scalar should give float o
|
utput
([0.0, 1.0, 2.0, 3.0], (inf, inf, inf, inf)),
([0, 1, 2, 3], (inf, inf, inf, inf)), # integer list
])
def test_inf_like(arr, expected):
"""
Test :func:`astropy.cosmology.utils.inf_like`.
All inputs should give a float output.
These tests are also in the docstring, but it's better to have them also
in one consolidated location.
"""
with pytest.warns(AstropyDeprecationWarning):
assert np.all(inf_like(arr) == expected)
|
felixbuenemann/sentry
|
src/sentry/rules/actions/notify_event.py
|
Python
|
bsd-3-clause
| 1,345
| 0.000743
|
"""
sentry.rules.actions.notify_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from sentry.plugins import plugins
from sentry.rules.actions.base import EventAction
from sentry.utils import metrics
from sentry.utils.safe import safe_execute
class NotifyEventAction(EventAction):
label = 'Send a notification (for all enabled services)'
def get_plugins(self):
from sentry.plugins.bases.notify imp
|
ort NotificationPlugin
results = []
for plugin in plugins.for_project(self.project, version=1):
if not isinstance(plugin, NotificationPlugin):
continue
results.append(plugin)
for plugin in plugins.for_project(self.project, version=2):
for
|
notifier in (safe_execute(plugin.get_notifiers) or ()):
results.append(notifier)
return results
def after(self, event, state):
group = event.group
for plugin in self.get_plugins():
if not safe_execute(plugin.should_notify, group=group, event=event):
continue
metrics.incr('notifications.sent.{}'.format(plugin.slug))
yield self.future(plugin.rule_notify)
|
CCS-Tech/duck-blocks
|
db_create.py
|
Python
|
gpl-3.0
| 475
| 0.004211
|
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database_repos
|
itory')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
|
api.version(SQLALCHEMY_MIGRATE_REPO))
|
sujitbehera27/MyRoboticsProjects-Arduino
|
src/resource/Python/examples/Adafruit16CServoDriver.py
|
Python
|
apache-2.0
| 529
| 0.009452
|
# The Adafruit16CServoDriver API is supported through Jython
servo1 = Runtime.createAndStart("servo1", "Servo")
pwm = Runtime.createAndStart("pwm", "Adafruit16CServoDriver")
pwm.connect("COM12")
# attach servo1 to pin 0 on the servo driver
pwm.attach(servo1, 0)
servo1.broadcastState()
servo1.moveTo(0)
sleep(1)
s
|
ervo1.moveTo(90)
sle
|
ep(1)
servo1.moveTo(180)
sleep(1)
servo1.moveTo(90)
sleep(1)
servo1.moveTo(0)
sleep(1)
servo1.moveTo(90)
sleep(1)
servo1.moveTo(180)
sleep(1)
servo1.moveTo(90)
sleep(1)
servo1.moveTo(0)
|
a-tal/pyweet
|
pyweet/settings.py
|
Python
|
bsd-3-clause
| 259
| 0
|
"""Pywe
|
et runtime settings."""
import os
class Settings(object):
"""Basic settings object for pyweet."""
API = "rgIYSFIeGBxVXOPy22QzA"
API_SECRET = "VX7ohOHpJm1mXlGX6XS08JcT4Vp8j83QhRNo
|
1SVRevb"
AUTH_FILE = os.path.expanduser("~/.pyweet")
|
hholzgra/maposmatic
|
www/maposmatic/views.py
|
Python
|
agpl-3.0
| 26,128
| 0.004785
|
# coding: utf-8
# maposmatic, the web front-end of the MapOSMatic city map generation system
# Copyright (C) 2009 David Decotigny
# Copyright (C) 2009 Frédéric Lehobey
# Copyright (C) 2009 Pierre Mauduit
# Copyright (C) 2009 David Mentré
# Copyright (C) 2009 Maxime Petazzoni
# Copyright (C) 2009 Thomas Petazzoni
# Copyright (C) 2009 Gaël Utard
# Copyright (C) 2019 Hartmut Holzgraefe
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Views for MapOSMatic
import datetime
import logging
import json
import os
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.urls import reverse
from django.http import HttpResponseRedirect, HttpResponseBadRequest, HttpResponseNotFound, HttpResponse, Http404
from django.db.transaction import TransactionManagementError
from django.shortcuts import get_object_or_404, render_to_response, render
from django.template import RequestContext
from django.utils.translation import ugettext, ugettext_lazy as _
from django.core import serializers
from django.forms.models import model_to_dict
from django.core.exceptions import ValidationError
from django.urls import get_script_prefix
from django.db import connections
from django.utils.safestring import mark_safe
import ocitysmap
from www.maposmatic import helpers, forms, nominatim, models
import www.settings
import psycopg2
LOG = logging.getLogger('maposmatic')
def index(request):
"""The main page."""
form = forms.MapSearchForm(request.GET)
job_list = (models.MapRenderingJob.objects.all()
.order_by('-submission_time'))
job_list = (job_list.filter(status=0) |
job_list.filter(status=1))
return render(request,
'maposmatic/index.html',
{ 'form': form,
'queued': job_list.count()
}
)
def about(request):
"""The about page."""
form = forms.MapSearchForm(request.GET)
job_list = (models.MapRenderingJob.objects.all()
.order_by('-submission_time'))
job_list = (job_list.filter(status=0) |
job_list.filter(status=1))
return render(request,
'maposmatic/about.html',
{ 'form': form,
'queued': job_list.count()
}
)
def privacy(request):
"""The privacy statement page."""
return render(request,
'maposmatic/privacy.html',
{ }
)
def documentation_user_guide(request):
"""The user guide page."""
return render(request,
'maposmatic/documentation-user-guide.html',
{ }
)
def documentation_api(request):
"""The api documentation."""
return render(request,
'maposmatic/documentation-api.html',
{ }
)
def donate(request):
"""The donate page."""
form = forms.MapSearchForm(request.GET)
job_list = (models.MapRenderingJob.objects.all()
.order_by('-submission_time'))
job_list = (job_list.filter(status=0) |
job_list.filter(status=1))
return render(request,
'maposmatic/donate.html',
{ 'form': form,
'queued': job_list.count()
}
)
def donate_thanks(request):
"""The thanks for donation page."""
return render_to_response('maposmatic/donate-thanks.html')
def create_upload_file(job, file):
first_line = file.readline().decode("utf-8-sig")
LOG.info("firstline type %s" % type(first_line))
if first_line.startswith(u'<?xml'):
file_type = 'gpx'
else:
file_type = 'umap'
file_instance = models.UploadFile(uploaded_file = file, file_type = file_type)
file_instance.save()
file_instance.job.add(job)
def new(request):
"""The map creation page and form."""
papersize_buttons = ''
if request.method == 'POST':
form = forms.MapRenderingJobForm(request.POST, request.FILES)
if form.is_valid():
request.session['new_layout'] = form.cleaned_data.get('layout')
request.session['new_stylesheet'] = form.cleaned_data.get('stylesheet')
request.session['new_overlay'] = form.cleaned_data.get('overlay')
request.session['new_paper_width_mm'] = form.cleaned_data.get('paper_width_mm')
request.session['new_paper_height_mm'] = form.cleaned_data.get('paper_height_mm')
job = form.save(commit=False)
job.administrative_osmid = form.cleaned_data.get('administrative_osmid')
job.stylesheet = form.cleaned_data.get('stylesheet')
job.overlay = ",".join(form.cleaned_data.get('overlay'))
job.layout = form.cleaned_data.get('layout')
job.paper_width_mm =
|
form.cleaned_data.get('paper_width_mm')
job.paper_height_mm = form.cleaned_data.get('paper_height_mm')
job.status = 0 # Submitted
if www.settings.SUBMITTER_IP_LIFETIME != 0:
job.submitterip = request.META['REMOTE_ADDR']
else:
job.submitterip = None
job.submitteremail = form.cleaned_data.get('submitteremail')
job.map_language = form.cleaned_data.get('map_l
|
anguage')
job.index_queue_at_submission = (models.MapRenderingJob.objects
.queue_size())
job.nonce = helpers.generate_nonce(models.MapRenderingJob.NONCE_SIZE)
job.save()
files = request.FILES.getlist('uploadfile')
for file in files:
create_upload_file(job, file)
return HttpResponseRedirect(reverse('map-by-id-and-nonce',
args=[job.id, job.nonce]))
else:
LOG.debug("FORM NOT VALID")
else:
init_vals = request.GET.dict()
oc = ocitysmap.OCitySMap(www.settings.OCITYSMAP_CFG_PATH)
if not 'layout' in init_vals and 'new_layout' in request.session :
init_vals['layout'] = request.session['new_layout']
else:
request.session['new_layout'] = oc.get_all_renderer_names()[0]
if not 'stylesheet' in init_vals and 'new_stylesheet' in request.session:
init_vals['stylesheet'] = request.session['new_stylesheet']
else:
request.session['new_stylesheet'] = oc.get_all_style_names()[0]
if not 'overlay' in init_vals and 'new_overlay' in request.session:
init_vals['overlay'] = request.session['new_overlay']
if not 'paper_width_mm' in init_vals and 'new_paper_width_mm' in request.session:
init_vals['paper_width_mm'] = request.session['new_paper_width_mm']
if not 'paper_height_mm' in init_vals and 'new_paper_width_mm' in request.session:
init_vals['paper_height_mm'] = request.session['new_paper_height_mm']
form = forms.MapRenderingJobForm(initial=init_vals)
_ocitysmap = ocitysmap.OCitySMap(www.settings.OCITYSMAP_CFG_PATH)
# TODO: create tempates for these button lines ...
papersize_buttons += "<p><button id='paper_best_fit' type='button' class='btn btn-primary papersize papersize_best_fit' onclick='set_papersize(0,0);'><i class='fas fa-square fa-2x'></i></button> <b>Best fit</b> (<span id='best_width'>?</span>×<span id='best_height'>?</span>mm²)</p>"
for p in _ocitysmap.get_all_paper_sizes():
if p[1] is not None:
|
tedye/leetcode
|
Python/leetcode.156.binary-tree-upside-down.py
|
Python
|
mit
| 822
| 0.001217
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __ini
|
t__(self,
|
x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def upsideDownBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if not root:
return None
leftbone = [root]
rightbone = []
temp = root
while temp.left:
leftbone.append(temp.left)
rightbone.append(self.upsideDownBinaryTree(temp.right))
temp = temp.left
leftbone[0].left = None
leftbone[0].right = None
for i in range(len(rightbone)):
leftbone[-i-1].left = rightbone[-i-1]
leftbone[-i-1].right = leftbone[-i-2]
return leftbone[-1]
|
sjl767/woo
|
py/pre/toys.py
|
Python
|
gpl-2.0
| 3,537
| 0.035906
|
from minieigen import *
from woo.dem import *
import woo.core, woo.models
from math import *
import numpy
class PourFeliciter(woo.core.Preprocessor,woo.pyderived.PyWooObject):
'''Showcase for custom packing predicates, and importing surfaces from STL.'''
_classTraits=None
_PAT=woo.pyderived.PyAttrTrait # less typing
_attrTraits=[
]
def __init__(self,**kw):
woo.core.Preprocessor.__init__(self)
self.wooPyInit(self.__class__,woo.core.Preprocessor,**kw)
def __call__(self):
# preprocessor builds the simulation when called
pass
class NewtonsCradle(woo.core.Preprocessor,woo.pyderived.PyWooObject):
'''Showcase for custom packing predicates, and importing surfaces from STL.'''
_classTraits=None
_PAT=woo.pyderived.PyAttrTrait # less typing
_attrTraits=[
_PAT(int,'nSpheres',5,'Total number of spheres'),
_PAT(int,'nFall',1,'The number of spheres which are out of the equilibrium position at the beginning.'),
_PAT(float,'fallAngle',pi/4.,unit='deg',doc='Initial angle of falling spheres.'),
_PAT(float,'rad',.005,unit='m',doc='Radius of spheres'),
_PAT(Vector2,'cabHtWd',(.1,.1),unit='m',doc='Height and width of the suspension'),
_PAT(float,'cabRad',.0005,unit='m',doc='Radius of the suspending cables'),
_PAT(woo.models.ContactModelSelector,'model',woo.models.ContactModelSelector(name='Hertz',restitution=.99,numMat=(1,2),matDesc=['spheres','cables'],mats=[FrictMat(density=3e3,young=2e8),FrictMat(density=.001,young=2e8)]),doc='Select contact model. The first material is for spheres; the second, optional, material, is for the suspension cables.'),
_PAT(Vector3,'gravity',(0,0,-9.81),'Gravity acceleration'),
_PAT(int,'plotEvery',10,'How often to collect plot data'),
_PAT(float,'dtSafety',.7,':obj:`woo.core.Scene.dtSafety`')
]
def __init__(self,**kw):
woo.core.Preprocessor.__init__(self)
self.wooPyInit(self.__class__,woo.core.Preprocessor,**kw)
def __call__(self):
pre=self
S=woo.core.Scene(fields=[DemField(gravity=pre.gravity)],dtSafety=self.dtSafety)
S.pre=pre.deepcopy()
# preprocessor builds the simulation when called
xx=numpy.linspace(0,(pre.nSpheres-1)*2*pre.rad,num=pre.nSpheres)
mat=pre.model.mats[0]
cabMat=(pre.model.mats[1] if len(pre.model.mats)>1 else mat)
ht=pre.cabHtWd[0]
for i,x in enumerate(xx):
color=min(.999,(x/xx[-1]))
s=Sphere.make((x,0,0) if i>=pre.nFall else (x-ht*sin(pre.fallAngle),0,ht-ht*cos(pre.fallAngle)),radius=pre.rad,mat=mat,color=color)
n=s.shape.nodes[0]
S.dem.par.add(s)
# sphere's node is integrated
S.dem.nodesAppend(n)
for p in [Vector3(x,-pre.cabHtWd[1]/2,pre.cabHtWd[0]),Vector3(x,pre.cabHtWd[1]/2,pre.cabHtWd[0])]:
t=Truss.make([n,p],radius=pre.cabRad,wire=False,color=color,mat=cabMat,fixed=None)
t.shape.nodes[1].blocked='xyzXYZ'
S.dem.par.add(t)
S.engines=DemField.minimalEngines(model=pre.model,dynDtPeriod=20)+[
IntraForce(
|
[In2_Truss_ElastMat()]),
woo.core.PyRunner(self.plotEvery,'S.plot.ad
|
dData(i=S.step,t=S.time,total=S.energy.total(),relErr=(S.energy.relErr() if S.step>1000 else 0),**S.energy)'),
]
S.lab.dynDt.maxRelInc=1e-6
S.trackEnergy=True
S.plot.plots={'i':('total','**S.energy')}
return S
|
Donkyhotay/MoonPy
|
zope/app/dtmlpage/interfaces.py
|
Python
|
gpl-3.0
| 1,715
| 0.000583
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""DTML Page content component int
|
erfaces
$Id: interfaces.py 39064 2005-10-11 18:40:10Z philikon $
"""
__docformat__ = 'restructuredtext'
import zope.schema
from zope.interface import Interface, Attribute
from zope.app.i18n import ZopeMessageFactory as _
class IDTMLPage(Interface):
"""DTML Pages are a persistent implementation of DTML."""
def setSource(text, content_type='text/html'):
"""Save the source of the page template."""
def getSource():
|
"""Get the source of the page template."""
source = zope.schema.Text(
title=_(u"Source"),
description=_(u"""The source of the dtml page."""),
required=True)
class IRenderDTMLPage(Interface):
content_type = Attribute('Content type of generated output')
def render(request, *args, **kw):
"""Render the page template.
The first argument is bound to the top-level `request`
variable. The positional arguments are bound to the `args`
variable and the keyword arguments are bound to the `kw`
variable.
"""
|
spillai/procgraph
|
src/procgraph_pil/pil_operations.py
|
Python
|
lgpl-3.0
| 1,746
| 0.006873
|
import numpy as np
from procgr
|
aph import COMPULSORY
from procgraph import simple_block
from .pil_conversions import Image_from_array
__all__ = ['resize']
@simple_block
def pil_zoom(value, factor=COMPULSORY):
""" Zooms by a given factor """
# TODO: RGBA?
shape = value.shape[:2]
shape2 = (int(factor * shape[0]), int(factor * shape[1]))
height, width = shape2
image = Image_from_array(value)
image = image.resize((width, height))
|
result = np.asarray(image.convert("RGB"))
return result
@simple_block
def resize(value, width=None, height=None):
'''
Resizes an image.
You should pass at least one of ``width`` or ``height``.
:param value: The image to resize.
:type value: image
:param width: Target image width.
:type width: int,>0
:param height: Target image height.
:type height: int,>0
:return: image: The image as a numpy array.
:rtype: rgb
'''
H, W = value.shape[:2]
if width is None and height is None:
msg = 'You should pass at least one of width or height.'
raise ValueError(msg)
if width is None and height is not None:
width = (height * H) / W
elif height is None and width is not None:
height = (width * W) / H
if width == W and height == H:
# print('want: %s have: %s -- No resize necessary' % (value.shape, (width, height)))
return value.copy()
image = Image_from_array(value)
# TODO: RGBA?
image = image.resize((width, height))
result = np.asarray(image.convert("RGB"))
assert result.shape[0] == height
assert result.shape[1] == width
return result
|
altair-viz/altair
|
altair/examples/scatter_with_loess.py
|
Python
|
bsd-3-clause
| 755
| 0.005298
|
"""
Scatter Plot with LOESS Lines
-----------------------------
This example shows how to add a trend line to a scatter plot using
the LOESS transform (LOcally Estimated Scatterplot Smoothing).
"""
# category: scatter plots
import altair as alt
import pandas as pd
import numpy as np
|
np.random.seed(1)
source = pd.DataFrame({
'x': np.arange(100),
'A': np.random.randn(100).cumsum(),
'B': np.random.randn(100).cumsum(),
'C': np.random.randn(100).cumsum(),
})
base = alt.Chart(source).mark_circle(opacity=0.5).transform_fold(
fold=['A', 'B', 'C'],
as_=['category', 'y']
).encode(
alt.X('x:Q'),
alt.Y('y:Q'),
al
|
t.Color('category:N')
)
base + base.transform_loess('x', 'y', groupby=['category']).mark_line(size=4)
|
grengojbo/satchmo
|
satchmo/apps/satchmo_utils/thumbnail/utils.py
|
Python
|
bsd-3-clause
| 9,086
| 0.005393
|
from django.conf import settings
from django.core.cache import get_cache
from django.db.models.fields.files import ImageField
from livesettings import config_value
from satchmo_utils.thumbnail.text import URLify
#ensure config is loaded
import satchmo_utils.thumbnail.config
import fnmatch
import logging
import os
import shutil
import urlparse
try:
import Image
except ImportError:
from PIL import Image
log = logging.getLogger('satchmo_utils.thumbnail')
image_cache = get_cache('locmem:///')
_FILE_CACHE_TIMEOUT = 60 * 60 * 60 * 24 * 31 # 1 month
_THUMBNAIL_GLOB = '%s_t*%s'
def _get_thumbnail_path(path, width=None, height=None):
""" create thumbnail path from path and required width and/or height.
thumbnail file name is constructed like this:
<basename>_t_[w<width>][_h<height>].<extension>
"""
# one of width/height is required
assert (width is not None) or (height is not None)
basedir = os.path.dirname(path) + '/'
base, ext = os.path.splitext(os.path.basename(path))
# make thumbnail filename
th_name = base + '_t'
if (width is not None) and (height is not None):
th_name += '_w%d_h%d' % (width, height)
elif width is not None:
th_name += '%d' % width # for compatibility with admin
elif height is not None:
th_name += '_h%d' % height
th_name += ext
return urlparse.urljoin(basedir, th_name)
def _get_path_from_url(url, root=settings.MEDIA_ROOT, url_root=settings.MEDIA_URL):
""" make filesystem path from url """
# if url.startswith('/'):
# return url
if url.startswith(url_root):
url = url[len(url_root):] # strip media root url
return os.path.normpath(os.path.join(root, url))
def _get_url_from_path(path, root=settings.MEDIA_ROOT, url_root=settings.MEDIA_URL):
""" make url from filesystem path """
if path.startswith(root):
path = path[len(root):] # strip media root
return urlparse.urljoin(root, path.replace('\\', '/'))
def _has_thumbnail(photo_url, width=None, height=None, root=settings.MEDIA_ROOT, url_root=settings.MEDIA_URL):
# one of width/height is required
assert (width is not None) or (height is not None)
return os.path.isfile(_get_path_from_url(_get_thumbnail_path(photo_url, width, height), root, url_root))
def make_thumbnail(photo_url, width=None, height=None, root=settings.MEDIA_ROOT, url_root=settings.MEDIA_URL):
""" create thumbnail """
# one of width/height is required
assert (width is not None) or (height is not None)
if not photo_url: return None
th_url = _get_thumbnail_path(photo_url, width, height)
th_path = _get_path_from_url(th_url, root, url_root)
photo_path = _get_path_from_url(photo_url, root, url_root)
if _has_thumbnail(photo_url, width, height, root, url_root):
# thumbnail already exists
if not (os.path.getmtime(photo_path) > os.path.getmtime(th_path)):
# if photo mtime is newer than thumbnail recreate thumbnail
return th_url
# make thumbnail
# get original image size
orig_w, orig_h = get_image_size(photo_url, root, url_root)
if (orig_w is None) and (orig_h) is None:
# something is wrong with image
return photo_url
# make proper size
if (width is not None) and (height is not None):
if (orig_w == width) and (orig_h == height):
# same dimensions
return None
size = (width, height)
elif width is not None:
if orig_w == width:
# same dimensions
return None
size = (width, orig_h)
elif height is not None:
if orig_h == height:
# same dimensions
return None
size = (orig_w, height)
try:
img = Image.open(photo_path).copy()
img.thumbnail(size, Image.ANTIALIAS)
img.save(th_path, quality=config_value('THUMBNAIL', 'IMAGE_QUALITY'))
except Exception, err:
# this goes to webserver error log
import sys
print >>sys.stderr, '[MAKE THUMBNAIL] error %s for file %r' % (err, photo_url)
return photo_url
return th_url
def remove_file_thumbnails(file_name_path):
if not file_name_path: return # empty path
import fnmatch, os
base, ext = os.path.splitext(os.path.basename(file_name_path))
basedir = os.path.dirname(file_name_path)
for file in fnmatch.filter(os.listdir(basedir), _THUMBNAIL_GLOB % (base, ext)):
path = os.path.join(basedir, file)
try:
os.remove(path)
except OSError:
# no reason to crash due to bad paths.
log.warn("Could not delete image thumbnail: %s", path)
image_cache.delete(path) # delete from cache
def make_admin_thumbnail(url):
""" make thumbnails for admin interface """
return make_thumbnail(url, width=120)
def make_admin_thumbnails(model):
""" create thumbnails for admin interface for all ImageFields (and subclasses) in the model """
for obj in model._meta.fields:
if isinstance(obj, ImageField):
url = getattr(model, obj.name).path
make_thumbnail(url, width=120)
def _get_thumbnail_url(photo_url, width=None, height=None, root=settings.MEDIA_ROOT, url_root=settings.MEDIA_URL):
""" return thumbnail URL for requested photo_url and required width and/or height
if thumbnail file do not exists returns original URL
"""
# one of width/height is required
assert (width is not None) or (height is not None)
if _has_thumbnail(photo_url, width, height, root, url_root):
return _get_thumbnail_path(photo_url, width, height)
else:
return photo_url
def _set_cached_file(path, value):
""" Store file dependent data in cache.
Timeout is set to _FILE_CACHE_TIMEOUT (1month).
"""
mtime = os.path.getmtime(path)
image_cache.set(path, (mtime, value,), _FILE_CACHE_TIMEOUT)
def _get_cached_file(path, default=None):
"""
|
Get file content from cache.
If modification time differ return None and delete
data from cache.
"""
cached = image_cache.get(path, default)
if cached is None:
return None
mtime, value = cached
if (not os.path.isfile(path)) or (os.path.getmtime(path) != mtime): # file is changed or deleted
image_cache.delete(path) # delete from ca
|
che
# remove thumbnails if exists
base, ext = os.path.splitext(os.path.basename(path))
basedir = os.path.dirname(path)
for file in fnmatch.filter(os.listdir(basedir), _THUMBNAIL_GLOB % (base, ext)):
os.remove(os.path.join(basedir, file))
return None
else:
return value
def get_image_size(photo_url, root=settings.MEDIA_ROOT, url_root=settings.MEDIA_URL):
""" returns image size.
image sizes are cached (using separate locmem:/// cache instance)
"""
path = _get_path_from_url(photo_url, root, url_root)
size = _get_cached_file(path)
if size is None:
try:
size = Image.open(path).size
except Exception, err:
# this goes to webserver error log
import sys
print >>sys.stderr, '[GET IMAGE SIZE] error %s for file %r' % (err, photo_url)
return None, None
if size is not None:
_set_cached_file(path, size)
else:
return None, None
return size
##################################################
## FILE HELPERS ##
def _rename(old_name, new_name):
""" rename image old_name -> name """
try:
shutil.move(os.path.join(settings.MEDIA_ROOT, old_name), os.path.join(settings.MEDIA_ROOT, new_name))
return new_name
except (IOError, shutil.Error):
return old_name
# BJK Note: I think this might be the way to approach it
# def rename_by_field(field, req_name, add_path=None):
# """Rename the file in filefield `field`"""
# if not (field and field.content):
# return field
def rename_by_field(file_path, req_name, add_path=None):
clean_path = lambda p: os.path.normpath(os.path.normcase(p))
if file_path.strip() == '
|
blindman/nhl-logo-scraper
|
tests/test_cli.py
|
Python
|
mit
| 604
| 0.004967
|
"""Tests for the main nhlscraper CLI module"""
from subprocess import PIPE, getoutput
from unittest import TestCase
from nhl_logo_scraper import __version__ as VERSION
class TestHel
|
p(TestCase):
def test_returns_usage_information(self):
output = getoutput("nhlscraper -h")
self.assertTrue('Usage:' in output)
output = getoutput("nhlscraper --help")
self.assertTrue('Usage:' in output)
class TestVersion(TestCase):
def test_returns_version_information(self):
output = getoutput('nhlscraper --version')
self.assertEqual(outpu
|
t.strip(), VERSION);
|
tersmitten/ansible
|
lib/ansible/modules/cloud/azure/azure_rm_functionapp_facts.py
|
Python
|
gpl-3.0
| 6,027
| 0.002323
|
#!/usr/bin/python
#
# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_functionapp_facts
version_added: "2.4"
short_description: Get Azure Function App facts
description:
- Get facts for one Azure Function App or all Function Apps within a resource group
options:
name:
description:
- Only show results for a specific Function App
resource_group:
description:
- Limit results to a resource group. Required when filtering by name
aliases:
- resource_group_name
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Thomas Stringer (@trstringer)"
'''
EXAMPLES = '''
- name: Get facts for one Function App
azure_rm_functionapp_facts:
resource_group: myResourceGroup
name: myfunctionapp
- name: Get facts for all Function Apps in a resource group
azure_rm_functionapp_facts:
resource_group: myResourceGroup
- name: Get facts for all Function Apps by tags
azure_rm_functionapp_facts:
tags:
- testing
'''
RETURN = '''
azure_functionapps:
description: List of Azure Function Apps dicts
returned: always
type: list
example:
id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp
name: myfunctionapp
kind: functionapp
location: East US
type: Microsoft.Web/sites
state: Running
host_names:
- myfunctionapp.azurewebsites.net
repository_site_name: myfunctionapp
usage_state: Normal
enabled: true
enabled_host_names:
- myfunctionapp.azurewebsites.net
- myfunctionapp.scm.azurewebsites.net
availability_state: Nor
|
mal
host_name_ssl_states:
|
- name: myfunctionapp.azurewebsites.net
ssl_state: Disabled
host_type: Standard
- name: myfunctionapp.scm.azurewebsites.net
ssl_state: Disabled
host_type: Repository
server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan
reserved: false
last_modified_time_utc: 2017-08-22T18:54:01.190Z
scm_site_also_stopped: false
client_affinity_enabled: true
client_cert_enabled: false
host_names_disabled: false
outbound_ip_addresses: ............
container_size: 1536
daily_memory_time_quota: 0
resource_group: myResourceGroup
default_host_name: myfunctionapp.azurewebsites.net
'''
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
class AzureRMFunctionAppFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
resource_group=dict(type='str', aliases=['resource_group_name']),
tags=dict(type='list'),
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_functionapps=[])
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMFunctionAppFacts, self).__init__(
self.module_arg_spec,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_functionapps'] = self.get_functionapp()
elif self.resource_group:
self.results['ansible_facts']['azure_functionapps'] = self.list_resource_group()
else:
self.results['ansible_facts']['azure_functionapps'] = self.list_all()
return self.results
def get_functionapp(self):
self.log('Get properties for Function App {0}'.format(self.name))
function_app = None
result = []
try:
function_app = self.web_client.web_apps.get(
self.resource_group,
self.name
)
except CloudError:
pass
if function_app and self.has_tags(function_app.tags, self.tags):
result = function_app.as_dict()
return [result]
def list_resource_group(self):
self.log('List items')
try:
response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item.as_dict())
return results
def list_all(self):
self.log('List all items')
try:
response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
except Exception as exc:
self.fail("Error listing all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(item.as_dict())
return results
def main():
AzureRMFunctionAppFacts()
if __name__ == '__main__':
main()
|
espressopp/espressopp
|
src/Tensor.py
|
Python
|
gpl-3.0
| 3,339
| 0.000299
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*****************
espressopp.Tensor
*****************
"""
from _espressopp import Tensor
__all__ = ['Tensor', 'toTensorFromVector', 'toTensor']
def extend_class():
# This injects additional methods into the Tensor class and pulls it
# into this module
origin_init = Tensor.__init__
def init(self, *args):
if len(args) == 0:
x = y = z = 0.0
elif len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Tensor):
xx = arg0.xx
yy = arg0.yy
zz = arg0.zz
xy = arg0.xy
xz = arg0.xz
yz = arg0.yz
# test whether the argument is iterable and has 3 elements
elif hasattr(arg0, '__iter__') and len(arg0) == 6:
xx, yy, zz, xy, xz, yz = arg0
elif isinstance(arg0, float) or isinstance(arg0, int):
xx = yy = zz = xy = xz = yz = arg0
else:
raise TypeError("Cannot initialize Tensor from %s" % (args))
elif len(args) == 6:
xx, yy, zz, xy, xz, yz = args
else:
raise TypeError("Cannot initialize Tensor from %s" % (args))
origin_init(self, xx, yy, zz, xy, xz, yz)
def _get_getter_setter(idx):
def _get(self):
return self[idx]
def _set(self, v):
self[idx] = v
return _get, _set
Tensor.__init__ = init
Tensor.xx = property(*_get_getter_setter(0))
Tensor.yy = property(*_get_getter_setter(1))
Tensor.zz = property(*_get_getter_setter(2))
Tensor.__str__ = lambda self: str((self[0], self[1], self[2], self[3], self[4], self[5]))
Tensor.__repr__ = lambda self: 'Tensor' + str(self)
extend_class()
def toTensorFromVector(*args):
"""Try to convert the arguments to a Tensor.
This function will only convert to a Tensor i
|
f x, y and z are
specified."""
if len(args) == 1:
arg0 = args[0]
if isinstance(arg0, Tensor):
return arg0
elif hasattr(arg0, '__iter__') and len(arg0) == 3:
return Tensor(*args)
elif len(args) == 3:
return Tensor(*args)
|
raise TypeError("Specify x, y and z.")
def toTensor(*args):
"""Try to convert the arguments to a Tensor, returns the argument,
if it is already a Tensor."""
if len(args) == 1 and isinstance(args[0], Tensor):
return args[0]
else:
return Tensor(*args)
|
CellModels/tyssue
|
tests/utils/test_connectivity.py
|
Python
|
gpl-2.0
| 2,530
| 0
|
import numpy as np
from tyssue import Sheet, Monolayer
from tyssue.generation import three_faces_sheet, extrude
from tyssue.utils import connectivity
from tyssue.config.geometry import bulk_spec
def test_ef_connect():
data, specs = three_faces_sheet()
sheet = Sheet("test", data, specs)
ef_connect = connectivity.edge_in_face_connectivity(sheet)
idx = sheet.edge_df.query(f"face == {sheet.Nf-1}").index
assert ef_connect[idx[0], idx[1]]
def test_face_face_connectivity():
|
data, specs = three_faces_sheet()
sheet = Sheet("test", data, specs)
ffc = connectivity.face_face_connectivity(sheet, exclud
|
e_opposites=False)
expected = np.array([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
np.testing.assert_array_equal(ffc, expected)
ffc = connectivity.face_face_connectivity(sheet, exclude_opposites=True)
expected = np.array([[0, 2, 2], [2, 0, 2], [2, 2, 0]])
np.testing.assert_array_equal(ffc, expected)
mono = Monolayer("test", extrude(data), bulk_spec())
ffc = connectivity.face_face_connectivity(mono, exclude_opposites=False)
assert ffc[0][ffc[0] == 2].shape == (10,)
assert ffc[0][ffc[0] == 1].shape == (4,)
assert ffc.max() == 4
ffc = connectivity.face_face_connectivity(mono, exclude_opposites=True)
assert ffc[0][ffc[0] == 2].shape == (10,)
assert ffc[0][ffc[0] == 1].shape == (4,)
assert ffc.max() == 2
def test_cell_cell_connectivity():
data, _ = three_faces_sheet()
mono = Monolayer("test", extrude(data), bulk_spec())
ccc = connectivity.cell_cell_connectivity(mono)
expected = np.array([[0, 36, 36], [36, 0, 36], [36, 36, 0]])
np.testing.assert_array_equal(ccc, expected)
def test_srce_trgt_connectivity():
data, specs = three_faces_sheet()
sheet = Sheet("test", data, specs)
stc = connectivity.srce_trgt_connectivity(sheet)
expected = np.array([3, 2, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1])
np.testing.assert_array_equal(stc.sum(axis=0), expected)
def test_verts_in_face_connectivity():
data, specs = three_faces_sheet()
sheet = Sheet("test", data, specs)
vfc = connectivity.verts_in_face_connectivity(sheet)
assert vfc[0][vfc[0] == 2].shape == (3,)
def test_verts_in_cell_connectivity():
data, specs = three_faces_sheet()
mono = Monolayer("test", extrude(data), bulk_spec())
ccc = connectivity.verts_in_cell_connectivity(mono)
assert ccc[0][ccc[0] == 9].shape == (18,)
assert ccc[0][ccc[0] == 18].shape == (6,)
assert ccc[0][ccc[0] == 27].shape == (1,)
|
p4lang/behavioral-model
|
mininet/1sw_demo.py
|
Python
|
apache-2.0
| 3,488
| 0.008601
|
#!/usr/bin/env python3
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.log import setLogLevel, info
from mininet.cli import CLI
from p4_mininet import P4Switch, P4Host
import argparse
from time import sleep
parser = argparse.ArgumentParser(description='Mininet demo')
parser.add_argument('--behavioral-exe', help='Path to behavioral executable',
type=str, action="store", required=True)
parser.add_argument('--thrift-port', help='Thrift server port for table updates',
type=int, action="store", default=9090)
parser.add_argument('--num-hosts', help='Number of hosts to connect to switch',
type=int, action="store", default=2)
parser.add_argument('--mode', choices=['l2', 'l3'], type=str, default='l3')
parser.add_argument('--json', help='Path to JSON config file',
type=str, action="store", required=True)
parser.add_argument('--pcap-dump', help='Dump packets on interfaces to pcap files',
type=str, action="store", required=False, default=False)
args = parser.parse_args()
class SingleSwitchTopo(Topo):
"Single switch connected to n (< 256) hosts."
def __init__(self, sw_path, json_path, thrift_port, pcap_dump, n, **opts):
# Initialize topology and default options
Topo.__init__(self, **opts)
switch = self.addSwitch('s1',
sw_path = sw_path,
json_path = json_path,
thrift_port = thrift_port,
pcap_dump = pcap_dump)
for h in range(n):
host = self.addHost('h%d' % (h + 1),
ip = "10.0.%d.10/24" % h,
mac = '00:04:00:00:00:%02x' %h)
self.addLink(host, switch)
def main():
num_hosts = args.num_hosts
mode = args.mode
topo = SingleSwitchTopo(args.behavioral_exe,
args.json,
args.thrift_port,
args.pcap_dump,
num_hosts)
net = Mininet(topo = topo,
host = P4Host,
switch = P4Switch,
controller = None)
net.start()
sw_mac = ["00:aa:bb:00:00:%02x" % n for n in range(num_hosts)]
s
|
w_addr = ["10
|
.0.%d.1" % n for n in range(num_hosts)]
for n in range(num_hosts):
h = net.get('h%d' % (n + 1))
if mode == "l2":
h.setDefaultRoute("dev eth0")
else:
h.setARP(sw_addr[n], sw_mac[n])
h.setDefaultRoute("dev eth0 via %s" % sw_addr[n])
for n in range(num_hosts):
h = net.get('h%d' % (n + 1))
h.describe()
sleep(1)
print("Ready !")
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
main()
|
combatopera/pym2149
|
ymtests/__init__.py
|
Python
|
gpl-3.0
| 707
| 0.001414
|
# Copyright 2014, 2018, 2019, 2020 Andrzej Cichocki
# This file is part of pym2149.
#
# pym2149 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Li
|
cense as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pym2149 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; wi
|
thout even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pym2149. If not, see <http://www.gnu.org/licenses/>.
|
saurabh6790/erpnext
|
erpnext/patches/v4_0/create_custom_fields_for_india_specific_fields.py
|
Python
|
gpl-3.0
| 1,704
| 0.032277
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.custom.doctype.custom_field.custom_field import create_custom_field_if_values_exist
|
def execute():
frappe.reload_doc("stock", "doctype", "purchase_receipt")
frappe.reload_doc("hr", "doctype", "employee")
frappe.reload_doc("Payroll", "doctype", "salary_slip")
india_specific_fields = {
"Purchase Receipt": [{
"label": "Supplier Shipment No",
"fieldname": "challan_no",
|
"fieldtype": "Data",
"insert_after": "is_subcontracted"
}, {
"label": "Supplier Shipment Date",
"fieldname": "challan_date",
"fieldtype": "Date",
"insert_after": "is_subcontracted"
}],
"Employee": [{
"label": "PAN Number",
"fieldname": "pan_number",
"fieldtype": "Data",
"insert_after": "company_email"
}, {
"label": "Gratuity LIC Id",
"fieldname": "gratuity_lic_id",
"fieldtype": "Data",
"insert_after": "company_email"
}, {
"label": "Esic Card No",
"fieldname": "esic_card_no",
"fieldtype": "Data",
"insert_after": "bank_ac_no"
}, {
"label": "PF Number",
"fieldname": "pf_number",
"fieldtype": "Data",
"insert_after": "bank_ac_no"
}],
"Salary Slip": [{
"label": "Esic No",
"fieldname": "esic_no",
"fieldtype": "Data",
"insert_after": "letter_head",
"permlevel": 1
}, {
"label": "PF Number",
"fieldname": "pf_no",
"fieldtype": "Data",
"insert_after": "letter_head",
"permlevel": 1
}]
}
for dt, docfields in india_specific_fields.items():
for df in docfields:
create_custom_field_if_values_exist(dt, df)
|
dstahlke/qitensor
|
qitensor/experimental/__init__.py
|
Python
|
bsd-2-clause
| 84
| 0
|
from . i
|
mport cartan_decompose
from . import stabiliz
|
ers
from . import noncommgraph
|
minlexx/pyevemon
|
esi_client/models/get_corporations_corporation_id_structures_service.py
|
Python
|
gpl-3.0
| 4,135
| 0.001693
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCorporationsCorporationIdStructuresService(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, state=None):
"""
GetCorporationsCorporationIdStructuresService - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'state': 'str'
}
self.attribute_map = {
'name': 'name',
'state': 'state'
}
self._name = name
self._state = state
@property
def name(self):
"""
Gets the name of this GetCorporationsCorporationIdStructuresService.
name string
:return: The name of this GetCorporationsCorporationIdStructuresService.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this GetCorporationsCorporationIdStructuresService.
name string
:param name: The name of this GetCorporationsCorporationIdStructuresService.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def state(self):
"""
Gets the state of this GetCorporationsCorporationIdStructuresService.
state string
:return: The state of this GetCorporationsCorporationIdStructuresService.
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""
Sets the state of this GetCorporationsCorporationIdStructuresService.
state string
:param state: The state of this GetCorporationsCorporationIdStructuresService.
:type: str
"""
allowed_values = ["online", "offline", "cleanup"]
if state not in allowed_values:
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}"
.format(state, allowed_values)
)
self._state = state
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
|
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Get
|
CorporationsCorporationIdStructuresService):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
buchuki/programming_lab
|
programming_lab/classlist/forms.py
|
Python
|
gpl-3.0
| 1,349
| 0.008154
|
# This file is part of Virtual Programming Lab.
#
# Virtual Programming Lab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Virtual Programming Lab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Virtual Programming Lab. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from classlist.models import ClassList, ClassRequest
class RequestClassForm(forms.Form):
classlist = forms.ModelChoiceField(label="Class",
queryset=ClassList.objects.all(), empty_label=None,
widget=forms.RadioSelect)
class ApproveRequestForm(forms.Form):
requests = forms.ModelMultipleChoiceField(
queryset = ClassRequest.objects.all(),
widg
|
et=forms.CheckboxSelectMultiple)
|
def __init__(self, queryset, *args, **kwargs):
super(ApproveRequestForm, self).__init__(*args, **kwargs)
self.fields['requests'].queryset = queryset
|
theflofly/tensorflow
|
tensorflow/python/saved_model/function_deserialization.py
|
Python
|
apache-2.0
| 14,043
| 0.008047
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for deserializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.core.framework import function_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as function_lib
from tensorflow.python.framework import func_graph as func_graph_lib
from tensorflow.python.framework import function_def_to_graph as function_def_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_tensor(t):
return isinstance(t, (ops.Tensor, resource_variable_ops.ResourceVariable))
def _call_concrete_function(function, inputs):
"""Calls a restored Function with structured inputs.
This differs from `function.__call__` in that inputs and outputs are
structured and that it casts inputs to tensors if needed.
Note: this does not checks that non-tensor inputs match. That should be
done before via `_concrete_function_callable_with`.
Args:
function: ConcreteFunction to call.
inputs: Structured inputs compatible with
`function.graph.structured_input_signature`.
Returns:
The structured function output.
"""
expected_structure = function.graph.structured_input_signature
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
tensor_inputs = []
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
tensor_inputs.append(
ops.convert_to_tensor(arg, dtype_hint=expected.dtype))
result = function._call_flat(tensor_inputs) # pylint: disable=protected-access
if isinstance(result, ops.Operation):
return None
return result
def _try_convert_to_tensor_spec(arg, dtype_hint):
"""Returns None or TensorSpec obtained if `arg` is converted to tensor."""
try:
# Note: try conversion in a FuncGraph to avoid poluting current context.
with func_graph_lib.FuncGraph(name="guess_conversion").as_default():
result = ops.convert_to_tensor(arg, dtype_hint=dtype_hint)
return tensor_spec.TensorSpec(shape=result.shape, dtype=result.dtype)
except (TypeError, ValueError):
return None
def _concrete_function_callable_with(function, inputs, allow_conversion):
"""Returns whether concrete `function` can be called with `inputs`."""
expected_structure = function.graph.structured_input_signature
try:
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
except (TypeError, ValueError):
return False
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
if allow_conversion:
arg = _try_convert_to_tensor_spec(arg, dtype_hint=expected.dtype)
if not _is_tensor(arg) and not isinstance(arg, tensor_spec.TensorSpec):
return False
if arg.dtype != expected.dtype:
return False
if not expected.shape.is_compatible_with(arg.shape):
return False
else:
if arg != expected:
return False
return True
def _deserialize_function_spec(function_spec_proto, coder):
"""Deserialize a FunctionSpec object from its proto representation."""
typeless_fullargspec = coder.decode_proto(function_spec_proto.fullargspec)
fullargspec = tf_inspect.FullArgSpec(
args=typeless_fullargspec.args,
varargs=typeless_fullargspec.varargs,
varkw=typeless_fullargspec.varkw,
defaults=typeless_fullargspec.defaults,
kwonlyargs=typeless_fullargspec.kwonlyargs,
kwonlydefaults=typeless_fullargspec.kwonlydefaults,
annotations=typeless_fullargspec.annotations)
is_method = function_spec_proto.is_method
args_to_prepend = coder.decode_proto(function_spec_proto.args_to_prepend)
kwargs_to_include = coder.decode_proto(function_spec_proto.kwargs_to_include)
input_signature = coder.decode_proto(function_spec_proto.input_signature)
return function_lib.FunctionSpec(fullargspec, is_method, args_to_prepend,
kwargs_to_include, input_signature)
# TODO(allenl): The fact that we can't derive ConcreteFunction calling
# conventions from the serialized input spec right now is unfortunate. Merging
# these would be good, maybe by adding TensorSpec names to cache keys so renamed
# keyword arguments would yield different ConcreteFunctions.
def setup_bare_concrete_function(saved_bare_concrete_function,
concrete_functions):
"""Makes a restored bare concrete function callable."""
# Bare concrete functions accept only flat lists of Tensors with unique
# names.
concrete_function = concrete_functions[
saved_bare_concrete_function.concrete_function_name]
# pylint: disable=protected-access
concrete_function._arg_keywords = (
saved_bare_concrete_function.argument_keywords)
concrete_function._num_positional_args = (
saved_bare_concrete_function.allowed_positional_arguments)
# pylint: enable=protected-access
concrete_function.add_to_graph()
return concrete_function
class RestoredFunction(def_function.Function):
"""Wrapper class for a function that has been restored from saved state.
See `def_function.Function`.
"""
def __init__(self, python_function, name, function_spec, concrete_functions):
# TODO(mdan): We may enable autograph once exceptions are supported.
super(RestoredFunction, self).__init__(
python_function, name, autograph=False)
self._concrete_functions = concrete_functions
self._function_spec = function_spec
def _list_all_concrete_functions_for_serialization(self):
return self._concrete_functions
def recreate_function(saved_function, concrete_functions):
"""Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`.
Returns:
A `Function`.
"""
# TODO(andresp): Construct a `Function` with the cache populated
# instead of creating a new `Function` backed by a Python layer to
# glue things together. Current approach is nesting functions deeper for each
# serialization cycle.
coder = nested_structure_coder.Structur
|
eCoder()
function_spec = _deserialize_function_spec(saved_function.function_spec,
coder)
def restored_function_body(*args, **kwargs):
"""Calls a restored function."""
# This is the format of function.graph.structured_input_signature. At this
# point, the args and kwargs have already been canonicalized.
inputs = (args, kwargs)
# First try to find a concrete function that can
|
be called without input
# conversions. This allows one to pick a more specific trace in case there
# was also a more expensive one that supported tensors.
for allow_conversion in [False, True]:
for function_name in saved_function.concrete_functions:
function = concrete_functions[function_name]
if _concrete_function_callable_with(function, inputs, allow_conversio
|
adityahase/frappe
|
frappe/core/doctype/installed_application/installed_application.py
|
Python
|
mit
| 278
| 0.007194
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# For license informat
|
ion, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class InstalledApp
|
lication(Document):
pass
|
orione7/Italorione
|
channels/guardarefilm.py
|
Python
|
gpl-3.0
| 11,213
| 0.002409
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canal para piratestreaming
# http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
# ------------------------------------------------------------
import re
import sys
import urlparse
from core import config
from core import logger
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "guardarefilm"
__category__ = "F"
__type__ = "generic"
__title__ = "guardarefilm (IT)"
__language__ = "IT"
host = "http://www.guardarefilm.tv"
headers = [
['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:38.0) Gecko/20100101 Firefox/38.0'],
['Accept-Encoding', 'gzip, deflate'],
['Referer', host]
]
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("streamondemand.guardarefilm mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Novita' Al Cinema[/COLOR]",
action="peliculas",
url="%s/streaming-al-cinema/" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR azure]Popolari[/COLOR]",
action="pelis_top100",
url="%s/top100.html" % host,
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"),
Item(channel=__channel__,
title="[COLOR azure]Categorie[/COLOR]",
action="categorias",
url=host,
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"),
Item(channel=__channel__,
title="[COLOR azure]Animazione[/COLOR]",
action="peliculas",
url="%s/streaming-cartoni-animati/" % host,
thumbnail="http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV[/COLOR]",
action="peliculas",
extra="serie",
url="%s/serie-tv-streaming/" % host,
thumbna
|
il="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/New%20TV%20Shows.png"),
Item(channel=__channel__,
|
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
action="search",
extra="serie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def categorias(item):
logger.info("streamondemand.guardarefilm categorias")
itemlist = []
data = scrapertools.cache_page(item.url, headers=headers)
# Narrow search by selecting only the combo
bloque = scrapertools.get_match(data, '<ul class="reset dropmenu">(.*?)</ul>')
# The categories are the options for the combo
patron = '<li><a href="([^"]+)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
scrapedurl = urlparse.urljoin(item.url, scrapedurl)
scrapedthumbnail = ""
scrapedplot = ""
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot))
return itemlist
def search(item, texto):
logger.info("[guardarefilm.py] " + item.url + " search " + texto)
item.url = host + "/?do=search&subaction=search&story=" + texto
try:
if item.extra == "serie":
return peliculas(item)
else:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def peliculas(item):
logger.info("streamondemand.guardarefilm peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url, headers=headers)
# Extrae las entradas (carpetas)
patron = '<div class="poster"><a href="([^"]+)".*?><img src="([^"]+)".*?><span.*?</div>\s*'
patron += '<div.*?><a.*?>(.*?)</a></div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
html = scrapertools.cache_page(scrapedurl, headers=headers)
start = html.find("<div class=\"textwrap\" itemprop=\"description\">")
end = html.find("</div>", start)
scrapedplot = html[start:end]
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
if (DEBUG): logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="episodios" if item.extra == "serie" else "findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=urlparse.urljoin(host, scrapedthumbnail),
plot=scrapedplot,
folder=True,
fanart=host + scrapedthumbnail))
# Extrae el paginador
patronvideos = '<div class="pages".*?<span>.*?<a href="([^"]+)">'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="HomePage",
title="[COLOR yellow]Torna Home[/COLOR]",
folder=True)),
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def HomePage(item):
import xbmc
xbmc.executebuiltin("ReplaceWindow(10024,plugin://plugin.video.streamondemand-pureita-master)")
def pelis_top100(item):
logger.info("streamondemand.guardarefilm peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url, headers=headers)
# Extrae las entradas (carpetas)
patron = r'<span class="top100_title"><a href="([^"]+)">(.*?\(\d+\))</a>'
matches = re.compile(patron).findall(data)
for scrapedurl, scrapedtitle in matches:
html = scrapertools.cache_page(scrapedurl, headers=headers)
start = html.find("<div class=\"textwrap\" itemprop=\"description\">")
end = html.find("</div>", start)
scrapedplot = html[start:end]
scrapedplot = re.sub(r'<[^>]*>', '', scrapedplot)
scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)
scrapedtitle = scra
|
b12io/orchestra
|
orchestra/bots/sanitybot.py
|
Python
|
apache-2.0
| 3,750
| 0
|
from django.db.models import Max
from django.db.models import Q
from django.utils import timezone
from pydoc import locate
from orchestra.core.errors import SanityBotError
from orchestra.models import Project
from orchestra.models import SanityCheck
from orchestra.models import WorkflowVersion
from orchestra.utils.notifications import message_experts_slack_group
def _handle(project, sanity_check, handler):
handler_type = handler.get('type')
handler_message = handler.get('message')
handler_steps = handler.get('steps')
is_invalid = (handler_type != 'slack_project_channel') or (
not handler_message or not handler_steps)
if is_invalid:
raise SanityBotError('Invalid handler: {}'.format(handler))
tasks = (
task for task in project.tasks.all()
if task.step.slug in handler_steps)
usernames = {
assignment.worker.formatted_slack_username()
for task in tasks
for assignment in task.assignments.all()
if assignment and assignment.worker
}
message = '{}: {}'.format(' '.join(usernames), handler_message)
message_experts_slack_group(
project.slack_group_id, message)
def _filter_checks(project, checks, check_configurations):
latest_check_creation = {
check['check_slug']: check['max_created_at']
for check in (SanityCheck.objects
.filter(project=project)
.values('check_slug')
.annotate(max_created_at=Max('created_at')))}
for check in checks:
max_created_at = latest_check_creation.get(check.check_slug)
seconds = (
check_configurations.g
|
et(check.check_slug, {})
.get('repetition_seconds'))
now = timezone.now()
seconds_none_or_rep_sec_lt = (max_created_at is None) or (
(seconds is not None) and (
(now - max_created_at).total_seconds() > seconds))
if seconds_none_or_rep_sec_lt:
yield check
def _handle_sanity_checks(project, sanity_checks, check_configurations):
sanity_checks = _filter_checks(
|
project, sanity_checks, check_configurations)
for sanity_check in sanity_checks:
config = check_configurations.get(sanity_check.check_slug)
if config is None:
raise SanityBotError(
'No configuration for {}'.format(sanity_check.check_slug))
handlers = config.get('handlers')
if handlers is None:
raise SanityBotError(
'No handlers for {}'.format(sanity_check.check_slug))
for handler in handlers:
_handle(project, sanity_check, handler)
sanity_check.handled_at = timezone.now()
sanity_check.project = project
sanity_check.save()
def create_and_handle_sanity_checks():
workflow_versions = WorkflowVersion.objects.all()
active = Q(status=Project.Status.ACTIVE)
paused = Q(status=Project.Status.PAUSED)
incomplete_projects = (Project.objects
.filter(workflow_version__in=workflow_versions)
.filter(active | paused))
for project in incomplete_projects:
sanity_checks = project.workflow_version.sanity_checks
sanity_check_path = (sanity_checks
.get('sanity_check_function', {})
.get('path'))
check_configurations = sanity_checks.get('check_configurations')
if sanity_check_path and check_configurations:
sanity_check_function = locate(sanity_check_path)
sanity_checks = sanity_check_function(project)
_handle_sanity_checks(
project, sanity_checks, check_configurations)
|
Vaan5/piecewisecrf
|
piecewisecrf/slim/variables_test.py
|
Python
|
mit
| 16,163
| 0.011075
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from piecewisecrf.slim import scopes
from piecewisecrf.slim import variables
class VariablesTest(tf.test.TestCase):
def testCreateVariable(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
self.assertEquals(a.op.name, 'A/a')
self.assertListEqual(a.get_shape().as_list(), [5])
def testGetVariables(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
with tf.variable_scope('B'):
b = variables.variable('a', [5])
self.assertEquals([a, b], variables.get_variables())
self.assertEquals([a], variables.get_variables('A'))
self.assertEquals([b], variables.get_variables('B'))
def testGetVariablesSuffix(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
with tf.variable_scope('A'):
b = variable
|
s.variable('b', [5])
self.assertEquals([a], variables.get_variables(suffix='a'))
self.assertEquals([b], variables.get_variables(suffix='b'))
def testGetVariableWithSingleVar(self):
with self.test_session():
with tf.variable_scope('parent'):
a = variables.variable('child', [5])
self.assertEquals(a, variables.get_unique_variable('parent/child'))
def testGetVariableWithDistractors(self):
with self.test_session():
|
with tf.variable_scope('parent'):
a = variables.variable('child', [5])
with tf.variable_scope('child'):
variables.variable('grandchild1', [7])
variables.variable('grandchild2', [9])
self.assertEquals(a, variables.get_unique_variable('parent/child'))
def testGetVariableThrowsExceptionWithNoMatch(self):
var_name = 'cant_find_me'
with self.test_session():
with self.assertRaises(ValueError):
variables.get_unique_variable(var_name)
def testGetThrowsExceptionWithChildrenButNoMatch(self):
var_name = 'parent/child'
with self.test_session():
with tf.variable_scope(var_name):
variables.variable('grandchild1', [7])
variables.variable('grandchild2', [9])
with self.assertRaises(ValueError):
variables.get_unique_variable(var_name)
def testGetVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
with tf.variable_scope('B'):
b = variables.variable('a', [5])
self.assertEquals([a, b], variables.get_variables_to_restore())
def testNoneGetVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5], restore=False)
with tf.variable_scope('B'):
b = variables.variable('a', [5], restore=False)
self.assertEquals([], variables.get_variables_to_restore())
self.assertEquals([a, b], variables.get_variables())
def testGetMixedVariablesToRestore(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [5])
b = variables.variable('b', [5], restore=False)
with tf.variable_scope('B'):
c = variables.variable('c', [5])
d = variables.variable('d', [5], restore=False)
self.assertEquals([a, b, c, d], variables.get_variables())
self.assertEquals([a, c], variables.get_variables_to_restore())
def testReuseVariable(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [])
with tf.variable_scope('A', reuse=True):
b = variables.variable('a', [])
self.assertEquals(a, b)
self.assertListEqual([a], variables.get_variables())
def testVariableWithDevice(self):
with self.test_session():
with tf.variable_scope('A'):
a = variables.variable('a', [], device='cpu:0')
b = variables.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFromScope(self):
with self.test_session():
with tf.device('/cpu:0'):
a = variables.variable('a', [])
b = variables.variable('b', [], device='cpu:1')
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
def testVariableWithDeviceFunction(self):
class DevFn(object):
def __init__(self):
self.counter = -1
def __call__(self, op):
self.counter += 1
return 'cpu:%d' % self.counter
with self.test_session():
with scopes.arg_scope([variables.variable], device=DevFn()):
a = variables.variable('a', [])
b = variables.variable('b', [])
c = variables.variable('c', [], device='cpu:12')
d = variables.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = variables.variable('e', initializer=e_init)
self.assertDeviceEqual(a.device, 'cpu:0')
self.assertDeviceEqual(a.initial_value.device, 'cpu:0')
self.assertDeviceEqual(b.device, 'cpu:1')
self.assertDeviceEqual(b.initial_value.device, 'cpu:1')
self.assertDeviceEqual(c.device, 'cpu:12')
self.assertDeviceEqual(c.initial_value.device, 'cpu:12')
self.assertDeviceEqual(d.device, 'cpu:2')
self.assertDeviceEqual(d.initial_value.device, 'cpu:2')
self.assertDeviceEqual(e.device, 'cpu:3')
self.assertDeviceEqual(e.initial_value.device, 'cpu:99')
def testVariableWithReplicaDeviceSetter(self):
with self.test_session():
with tf.device(tf.train.replica_device_setter(ps_tasks=2)):
a = variables.variable('a', [])
b = variables.variable('b', [])
c = variables.variable('c', [], device='cpu:12')
d = variables.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = variables.variable('e', initializer=e_init)
# The values below highlight how the replica_device_setter puts initial
# values on the worker job, and how it merges explicit devices.
self.assertDeviceEqual(a.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(a.initial_value.device, '/job:worker/cpu:0')
self.assertDeviceEqual(b.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(b.initial_value.device, '/job:worker/cpu:0')
self.assertDeviceEqual(c.device, '/job:ps/task:0/cpu:12')
self.assertDeviceEqual(c.initial_value.device, '/job:worker/cpu:12')
self.assertDeviceEqual(d.device, '/job:ps/task:1/cpu:0')
self.assertDeviceEqual(d.initial_value.device, '/job:worker/cpu:0')
self.assertDeviceEqual(e.device, '/job:ps/task:0/cpu:0')
self.assertDeviceEqual(e.initial_value.device, '/job:worker/cpu:99')
def testVariableWithVariableDeviceChooser(self):
with tf.Graph().as_default():
device_fn = variables.VariableDeviceChooser(num_parameter_servers=2)
with scopes.arg_scope([variables.variable], device=device_fn):
a = variables.variable('a', [])
b = variables.variable('b', [])
c = variables.variable('c', [], device='cpu:12')
d = variables.variable('d', [])
with tf.device('cpu:99'):
e_init = tf.constant(12)
e = variables.variable('e', initializer=e_init)
# The values below h
|
jrichte43/ProjectEuler
|
Problem-0081/solutions.py
|
Python
|
gpl-3.0
| 949
| 0.007376
|
__problem_title__ = "Path sum: two ways"
__problem_url___ = "https://projecteuler.net/problem=81"
__problem_description__ = "In the 5 by 5 matrix below, the minimal path sum from the top left to
|
" \
"the bottom right, by , is indicated in bold red and is equal to 2427. " \
"Find the minimal path sum, in (right click and "Save Link/Target " \
"As..."), a 31K text file containing a 80 by 80 matrix, from the top " \
|
"left to the bottom right by only moving right and down."
import timeit
class Solution():
@staticmethod
def solution1():
pass
@staticmethod
def time_solutions():
setup = 'from __main__ import Solution'
print('Solution 1:', timeit.timeit('Solution.solution1()', setup=setup, number=1))
if __name__ == '__main__':
s = Solution()
print(s.solution1())
s.time_solutions()
|
y-sira/atcoder
|
ddcc2017-qual/b.py
|
Python
|
mit
| 79
| 0
|
a, b,
|
c, d = map(int, input().split())
print(a * 1728 + b * 144 +
|
c * 12 + d)
|
kaarl/pyload
|
module/plugins/crypter/BitshareComFolder.py
|
Python
|
gpl-3.0
| 522
| 0.011494
|
# -*- coding: utf-8 -*-
from module.plugins.internal.Dead
|
Crypter import DeadCrypter
class BitshareComFolder(DeadCrypter):
__name__ = "BitshareComFolder"
__type__ = "crypter"
__version__ = "0.10"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?bitshare\.com/\?d=\w+'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """Bitshare.com folder decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("stickell", "l.stickell@y
|
ahoo.it")]
|
goshow-jp/Kraken
|
Python/kraken/ui/HAppkit_Editors/editor_widgets/nested_editor.py
|
Python
|
bsd-3-clause
| 3,053
| 0.005568
|
import json
from PySide import QtCore, QtGui
from ..fe import FE
from ..widget_factory import EditorFactory
from ..base_editor import BaseValueEditor
from ..core.value_controller import MemberController
class NestedEditor(BaseValueEditor):
def __init__(self, valueController, parent=None):
super(NestedEditor, self).__init__(valueController, parent=parent)
self._value = self._invokeGetter()
self._labels = {}
self._editors = {}
self._gridRow = 0
self._grid = QtGui.QGridLayout()
self._grid.setColumnStretch(1, 1)
if self._valueController.hasOption('displayGroupbox'):
groupBox = QtGui.QGroupBox(self._valueController.getDataType())
groupBox.setLayout(self._grid)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(groupBox)
self.setLayout(vbox)
else:
self._grid.setContentsMargins(0, 0, 0, 0)
self.setLayout(self._grid)
def addValueEditor(self, n
|
ame, widget):
# widget.set
|
SizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
label = QtGui.QLabel(name, self)
# label.setMaximumWidth(200)
# label.setContentsMargins(0, 5, 0, 0)
# label.setMinimumWidth(60)
# label.setSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
# label.setAlignment(QtCore.Qt.AlignRight)
# label.adjustSize()
rowSpan = widget.getRowSpan()
columnSpan = widget.getColumnSpan()
# if columnSpan==1:
self._grid.addWidget(label, self._gridRow, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self._grid.addWidget(widget, self._gridRow, 1)#, QtCore.Qt.AlignLeft)
self._gridRow += 1
# else:
# self._grid.addWidget(label, self._gridRow, 0, QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
# self._grid.addWidget(widget, self._gridRow+1, 0, rowSpan, columnSpan)
# self._gridRow += 2
self._labels[name] = label
self._editors[name] = widget
def addMemberEditor(self, memberName, memberType):
memberController = MemberController(memberName, memberType, owner=self._value, editable=self.isEditable())
# memberController = self._valueController.getMemberController(memberName)
widget = EditorFactory.constructEditor(memberController, parent=self)
if widget is None:
return
self.addValueEditor(memberName, widget)
def getEditorValue(self):
return self._value
def setEditorValue(self, value):
raise Exception("This method must be implimented by the derived widget:" + self.__class__.__name__)
def clear(self):
"""
When the widget is being removed from the inspector,
this method must be called to unregister the event handlers
"""
for label, widget in self._labels.iteritems():
widget.deleteLater()
for label, widget in self._editors.iteritems():
widget.deleteLater()
|
Nik0l/UTemPro
|
ML/Clustering.py
|
Python
|
mit
| 10,761
| 0.008642
|
__author__ = 'nb254'
import numpy as np
import pandas as pd
from sklearn import cluster
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import kneighbors_graph
import ClusteringPrediction as cp
import ClusteringSaveResults as csr
import DataPreprocessing as dp
import FeatureSelector as ftrs
import Questions as question
from Visualization import ClusteringPlot as plot
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.preprocessing import normalize
from sklearn.decomposition import PCA
def clusterData(data, clust, results, to_plot):
plot_sample_size = 6000
if clust['clustering_type'] == 'kmeans':
#TODO kmeans works well even on 2.000.000 questions
kmeans = KMeans(init='k-means++', n_clusters=clust['n_clusters'], n_init=10)
kmeans.fit(data)
clust['centers'] = kmeans.cluster_centers_
results['cluster_labels'] = kmeans.labels_
if to_plot:
plot.PlotData(data, kmeans, plot_sample_size, clust['exp'])
if clust['clustering_type'] == 'spectral':
spectral = cluster.SpectralClustering(n_clusters=clust['n_clusters'],
eigen_solver='arpack',
affinity="nearest_neighbors")
spectral.fit(data)
plot.PlotData(data, spectral, plot_sample_size, clust['exp'])
if clust['clustering_type'] == 'birch':
birch = cluster.Birch(n_clusters=results['n_clusters'])
birch.fit(data)
results['cluster_labels'] = birch.labels_
print 'number of entries clustered', len(results['cluster_labels'])
plot.PlotData(data, birch, plot_sample_size, clust['exp'])
if clust['clustering_type'] == 'dbscan':
dbscan = cluster.DBSCAN(eps=.2)
dbscan.fit(data)
results['cluster_labels'] = dbscan.labels_
plot.PlotData(data, dbscan, plot_sample_size, clust['exp'])
if clust['clustering_type'] == 'affinity_propagation':
affinity_propagation = cluster.AffinityPropagation(damping=.9, preference=-200)
affinity_propagation.fit(data)
plot.PlotData(data
|
, affinity_propagation, plot_sample_si
|
ze, clust['exp'])
if clust['clustering_type'] == 'ward':
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(data, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
ward = cluster.AgglomerativeClustering(n_clusters=clust['n_clusters'], linkage='ward',
connectivity=connectivity)
ward.fit(data)
results['cluster_labels'] = ward.labels_
plot.PlotData(data, ward, plot_sample_size, clust['exp'])
if clust['clustering_type'] == 'average_linkage':
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(data, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=clust['n_clusters'],
connectivity=connectivity)
average_linkage.fit(data)
results['cluster_labels'] = average_linkage.labels_
plot.PlotData(data, average_linkage, plot_sample_size, clust['exp'])
df = csr.clustDfFromRes(results)
stats = csr.clusterResults(df, clust)
return df, stats
'''
def firstExperiment(sampled_data, features_all, clust, results, filename_clusters, filename_stats):
clust['exp']= 1
for feature in features_all:
df, stats = runClustering(sampled_data[['SecondsToAcceptedAnswer'] + [feature]], clust, results)
csr.to_csv(stats, filename_stats)
df.to_csv(filename_clusters)
clust['exp'] = clust['exp'] + 1
def secondExperiment(sampled_data, features_variations, clust, results, filename_clusters):
clust['exp'] = 50
for features in features_variations:
df, stats = runClustering(sampled_data[features], clust, results)
csr.to_csv(stats, filename_stats)
df.to_csv(filename_clusters)
clust['exp'] = clust['exp'] + 1
def thirdExperiment(sampled_data, clust, results, filename_clusters, filename_stats):
features = ftrs.setAllFeatures()
df, stats = runClustering(sampled_data[features], clust, results)
csr.to_csv(stats, filename_stats)
df.to_csv(filename_clusters)
'''
def initClust(exp, n_clusters, sample_size, features_to_use, clustering_type):
clust = dict(
exp=exp,
n_samples=sample_size,
features=features_to_use,
n_features=len(features_to_use)+1,
n_clusters=n_clusters,
clustering_type=clustering_type,
centers=np.empty([n_clusters, 2]), #centroids
)
return clust
def printQues():
QuestionIds = [4, 6, 7]
ques = question.getQuestionsbyId(QuestionIds)
print '%'*100
for que in ques:
print que
def getQuestions(labels, df):
print df.head()
df1 = df[df['Cluster'].isin(labels)]
#df1 = df1.drop('Unnamed: 0', 1)
return df1
def selectLabels(df, criteria=4):
#if df['mean_time'] < 5:
#labels = [0,1,2]
labels = [0, 1, 2, 3, 4]
return labels
def mergeTitle(df1, filename2):
#df1 = pd.read_csv(filename1)
df4 = pd.read_csv(filename2)
file_name = 'merged_new.csv'
result = pd.merge(df1, df4, on='PostId')
result.to_csv(file_name)
def matchClusters(dir_c, df, dfs, dfc, filename_out):
labels = selectLabels(dfs)
# for all labels of interest
dfq = getQuestions(labels, dfc)
qlist = dfq['PostId'].tolist()
print qlist
df1 = df[df['PostId'].isin(qlist)]
df1.to_csv(filename_out)
#for each labels
for label in labels:
dfq = getQuestions([label], dfc)
qlist = dfq['PostId'].tolist()
dfl = df[df['PostId'].isin(qlist)]
dfl.to_csv(dir_c + str(label) + '_' + filename_out)
#dfq.to_csv(filename_out)
def clustering(clust, filenames, saved=False):
#mergeTitle(df, filename2)
if saved:
stats = pd.read_csv(filenames['stats'])
clusters = pd.read_csv(filenames['clusters'])
else:
data, results = dp.getDataForClustering(filenames, clust)
#TODO divide data into training and testing datasets
clust['n_samples'] = len(data)
print 'total instances:', clust['n_samples']
testing_num = int(clust['n_samples'] * 0.2)
#testing_num = 1924500
results['quest_id'] = results['quest_id'][testing_num:clust['n_samples']]
results['time_row'] = results['time_row'][testing_num:clust['n_samples']]
print 'testing instances: ', str(testing_num) # 385981
print 'Started clustering...'
#clusters, stats = clusterData(data, clust, results, False)
clusters, stats = clusterData(data[testing_num:clust['n_samples']], clust, results, False)
print 'Saving the clustering results...'
csr.to_csv1(stats, filenames['stats'])
clusters.to_csv(filenames['clusters'])
return stats, clusters
def clusteringA(clustMeta, dir_c, filenames):
#os.mkdir(dir_c, 0777)
stats, dfn = clustering(clustMeta, filenames)
# match clusters to data
print 'Opening a file with the data on the questions'
df = pd.read_csv(filenames['input'])
print 'Matching the data on the questions with the clusters'
matchClusters(dir_c, df, stats, dfn, filenames['out'])
#TODO prediction using clusters n
dfpca = pd.read_csv(dir_c + 'pca.csv', header=None)
#print dfpca.shape
#print dfpca[0:12]
test = dfpca[0:50]
print len(test)
n_neighbors = 3
dfstats = pd.read_csv(filenames['stats'])
#dfstats = dfstats[dfstats['questions'].str.contains("questions") == False]
#print dfstats
df = pd.read_csv(filenames['clusters'])
neigh = NearestNeighbors(n_neighbors=n_neighbors)
neigh.fit(dfstats[['x','y']])
#print test
closest = neigh.kneighbors(test) #TODO: dimension mismatching
data = cp.calcAccuracy(dfstats, closest, df, n_neighbo
|
inter-rpm/dns-interface
|
main.py
|
Python
|
gpl-3.0
| 650
| 0.009231
|
#!/usr/bin/env python
# coding=utf-8
import sys
from flask import Flask, request, jsonify
import simplejson as json
import
|
handlers
app = Flask(__name__)
api_list = {
'api/dns': u'get dns',
'api/update': u'update domain information',
}
@app.ro
|
ute("/", methods = ['GET', 'POST'])
def index():
return jsonify(api_list)
@app.route("/api/dns", methods=['GET', 'POST'])
def dns():
body = request.json
ip = request.remote_addr
data = handlers.DNSHandler(ip, body)
return jsonify(data)
@app.route("/api/update", methods=['GET', 'POST'])
def update():
pass
if __name__ == '__main__':
app.run(debug=True)
|
leilihh/nova
|
nova/scheduler/rpcapi.py
|
Python
|
apache-2.0
| 5,246
| 0.000953
|
# Copyright 2013, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the scheduler manager RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from nova.objects import base as objects_base
from nova.openstack.common import jsonutils
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('scheduler_topic',
default='scheduler',
help='The topic scheduler nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('scheduler',
help='Set a version cap for messages sent to scheduler services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class SchedulerAPI(object):
'''Client side of the scheduler rpc API.
API version history:
1.0 - Initial version.
1.1 - Changes to prep_resize():
- remove instance_uuid, add instance
- remove instance_type_id, add instance_type
- remove topic, it was unused
1.2 - Remove topic from run_instance, it was unused
1.3 - Remove instance_id, add instance to live_migration
1.4 - Remove update_db from prep_resize
1.5 - Add reservations argument to prep_resize()
1.6 - Remove reservations argument to run_instance()
1.7 - Add create_volume() method, remove topic from live_migration()
2.0 - Remove 1.x backwards compat
2.1 - Add image_id to create_volume()
2.2 - Remove reservations argument to create_volume()
2.3 - Remove create_volume()
2.4 - Change update_service_capabilities()
- accepts a list of capabilities
2.5 - Add get_backdoor_port()
2.6 - Add select_hosts()
... Grizzly supports message version 2.6. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.6.
2.7 - Add select_destinations()
2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used
by the compute manager for retries.
2.9 - Added the legacy_bdm_in_spec parameter to run_instance()
... Havana supports message version 2.9. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.9.
... - Deprecated live_migration() call, moved to conductor
... - Deprecated select_hosts()
3.0 - Removed backwards compat
'''
VERSION_ALIASES = {
'grizzly': '2.6',
'havana': '2.9',
'icehouse': '3.0',
}
def __init__(self):
super(SchedulerAPI, self).__init__()
target = messaging.Target(topic=CONF.scheduler_topic, version='3.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.scheduler,
CONF.upgrade_levels.scheduler)
serializer = objects_base.NovaObjectSerializer()
self.client = rpc.get_client(target, version_cap=version_cap,
serializer=serializer)
def select_destinations(self, ctxt, request_spec, filter_properties):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'select_destinations',
request_spec=request_spec, filter_properties=filter_properties)
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties, legacy_bdm_in_spec=True):
msg_kwargs = {'request_spec': request_spec,
'admin_password': admin_password,
'injected_files': injected_files,
'requested_networks': requested_networks,
'is_first_time': is_first_time,
'filter_properties': filter_properties,
'legacy_bdm_in_spec': legacy_bdm_in_spec}
|
cctxt = self.client.prepare()
|
cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def prep_resize(self, ctxt, instance, instance_type, image,
request_spec, filter_properties, reservations):
instance_p = jsonutils.to_primitive(instance)
instance_type_p = jsonutils.to_primitive(instance_type)
reservations_p = jsonutils.to_primitive(reservations)
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare()
cctxt.cast(ctxt, 'prep_resize',
instance=instance_p, instance_type=instance_type_p,
image=image_p, request_spec=request_spec,
filter_properties=filter_properties,
reservations=reservations_p)
|
tysonholub/twilio-python
|
tests/integration/api/v2010/account/test_connect_app.py
|
Python
|
mit
| 7,437
| 0.003765
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holod
|
eck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class ConnectAppTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
|
.connect_apps(sid="CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ConnectApps/CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"authorize_redirect_url": "http://example.com/redirect",
"company_name": "Twilio",
"deauthorize_callback_method": "GET",
"deauthorize_callback_url": "http://example.com/deauth",
"description": null,
"friendly_name": "Connect app for deletion",
"homepage_url": "http://example.com/home",
"permissions": [],
"sid": "CNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps/CNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.connect_apps(sid="CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.connect_apps(sid="CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ConnectApps/CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"authorize_redirect_url": "http://example.com/redirect",
"company_name": "Twilio",
"deauthorize_callback_method": "GET",
"deauthorize_callback_url": "http://example.com/deauth",
"description": null,
"friendly_name": "Connect app for deletion",
"homepage_url": "http://example.com/home",
"permissions": [],
"sid": "CNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps/CNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.connect_apps(sid="CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.connect_apps.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ConnectApps.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"connect_apps": [
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"authorize_redirect_url": "http://example.com/redirect",
"company_name": "Twilio",
"deauthorize_callback_method": "GET",
"deauthorize_callback_url": "http://example.com/deauth",
"description": null,
"friendly_name": "Connect app for deletion",
"homepage_url": "http://example.com/home",
"permissions": [],
"sid": "CNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps/CNaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
],
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json?Page=0&PageSize=50",
"next_page_uri": null,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.connect_apps.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"connect_apps": [],
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json?Page=0&PageSize=50",
"next_page_uri": null,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.connect_apps.list()
self.assertIsNotNone(actual)
def test_delete_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.connect_apps(sid="CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.holodeck.assert_has_request(Request(
'delete',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/ConnectApps/CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_delete_response(self):
self.holodeck.mock(Response(
204,
None,
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.connect_apps(sid="CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").delete()
self.assertTrue(actual)
|
ULHPC/modules
|
easybuild/easybuild-easyblocks/easybuild/easyblocks/w/wps.py
|
Python
|
mit
| 14,921
| 0.003418
|
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing WPS, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import fileinput
import os
import re
import shutil
import sys
import tempfile
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.netcdf import set_netcdf_env_vars #@UnresolvedImport
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import extract_file, patch_perl_script_autoflush, rmtree2
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_WPS(EasyBlock):
"""Support for building/installing WPS."""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to WPS."""
super(EB_WPS, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.comp_fam = None
self.wrfdir = None
self.compile_script = None
@staticmethod
def extra_options():
testdata_urls = [
"http://www.mmm.ucar.edu/wrf/src/data/avn_data.tar.gz",
"http://www.mmm.ucar.edu/wrf/src/wps_files/geog.tar.gz", # 697MB download, 16GB unpacked!
]
extra_vars = {
'buildtype': [None, "Specify the type of build (smpar: OpenMP, dmpar: MPI).", MANDATORY],
'runtest': [True, "Build and run WPS tests", CUSTOM],
'testdata': [testdata_urls, "URL to test data required to run WPS test", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure build:
- set required environment variables (for netCDF, JasPer)
- patch compile script and ungrib Makefile for non-default install paths of WRF and JasPer
- run configure script and figure how to select desired build option
- patch configure.wps file afterwards to fix 'serial compiler' setting
"""
# netCDF dependency check + setting env vars (NETCDF, NETCDFF)
set_netcdf_env_vars(self.log)
# WRF dependency check
wrf = get_software_root('WRF')
if wrf:
majver = get_software_version('WRF').split('.')[0]
self.wrfdir = os.path.join(wrf, "WRFV%s" % majver)
else:
raise EasyBuildError("WRF module not loaded?")
# patch compile script so that WRF is found
self.compile_script = "compile"
try:
for line in fileinput.input(self.compile_script, inplace=1, backup='.orig.wrf'):
line = re.sub(r"^(\s*set\s*WRF_DIR_PRE\s*=\s*)\${DEV_TOP}(.*)$", r"\1%s\2" %
|
self.wrfdir, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s script: %s", self.compile_script, err)
# libpng dependency check
libpng = get_
|
software_root('libpng')
zlib = get_software_root('zlib')
if libpng:
paths = [libpng]
if zlib:
paths.insert(0, zlib)
libpnginc = ' '.join(['-I%s' % os.path.join(path, 'include') for path in paths])
libpnglib = ' '.join(['-L%s' % os.path.join(path, 'lib') for path in paths])
else:
raise EasyBuildError("libpng module not loaded?")
# JasPer dependency check + setting env vars
jasper = get_software_root('JasPer')
if jasper:
env.setvar('JASPERINC', os.path.join(jasper, "include"))
jasperlibdir = os.path.join(jasper, "lib")
env.setvar('JASPERLIB', jasperlibdir)
jasperlib = "-L%s" % jasperlibdir
else:
raise EasyBuildError("JasPer module not loaded?")
# patch ungrib Makefile so that JasPer is found
fn = os.path.join("ungrib", "src", "Makefile")
jasperlibs = "%s -ljasper %s -lpng" % (jasperlib, libpnglib)
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.JasPer'):
line = re.sub(r"^(\s*-L\.\s*-l\$\(LIBTARGET\))(\s*;.*)$", r"\1 %s\2" % jasperlibs, line)
line = re.sub(r"^(\s*\$\(COMPRESSION_LIBS\))(\s*;.*)$", r"\1 %s\2" % jasperlibs, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
# patch arch/Config.pl script, so that run_cmd_qa receives all output to answer questions
patch_perl_script_autoflush(os.path.join("arch", "Config.pl"))
# configure
# determine build type option to look for
self.comp_fam = self.toolchain.comp_family()
build_type_option = None
if LooseVersion(self.version) >= LooseVersion("3.4"):
knownbuildtypes = {
'smpar': 'serial',
'dmpar': 'dmpar'
}
if self.comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
build_type_option = " Linux x86_64, Intel compiler"
elif self.comp_fam == toolchain.GCC: #@UndefinedVariable
build_type_option = "Linux x86_64 g95 compiler"
else:
raise EasyBuildError("Don't know how to figure out build type to select.")
else:
knownbuildtypes = {
'smpar': 'serial',
'dmpar': 'DM parallel'
}
if self.comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
build_type_option = "PC Linux x86_64, Intel compiler"
elif self.comp_fam == toolchain.GCC: #@UndefinedVariable
build_type_option = "PC Linux x86_64, gfortran compiler,"
knownbuildtypes['dmpar'] = knownbuildtypes['dmpar'].upper()
else:
raise EasyBuildError("Don't know how to figure out build type to select.")
# check and fetch selected build type
bt = self.cfg['buildtype']
if not bt in knownbuildtypes.keys():
raise EasyBuildError("Unknown build type: '%s'. Supported build types: %s", bt, knownbuildtypes.keys())
# fetch option number based on build type option and selected build type
build_type_question = "\s*(?P<nr>[0-9]+).\s*%s\s*\(?%s\)?\s*\n" % (build_type_option, knownbuildtypes[bt])
cmd = "./configure"
qa = {}
no_qa = [".*compiler is.*"]
std_qa = {
# named group in match will be used to construct answer
r"%s(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: "%(nr)s",
}
run_cmd_qa(cmd, qa, no_qa=no_qa, std_qa=std_qa, log_all=True, sim
|
SimbaService/Simba
|
server/scripts/probe/swift.py
|
Python
|
apache-2.0
| 8,605
| 0.023707
|
#!/usr/bin/python
import probe_config as conf
import socket
import re
import os
import tempfile
import shutil
class Swift:
def __init__(self, myname, is_storage):
self.myname = myname
print "Myname = " + self.myname
self.allnodes = conf.swift_nodes
print "all nodes=" + str(self.allnodes)
self.all_ips = [socket.gethostbyname(x) for x in self.allnodes]
self.my_ip = socket.gethostbyname(self.myname)
self.base_dir = '/srv/node/%s1' % conf.data_disk
self.is_storage = is_storage
def _grep(self, needle, filename):
with open(filename, "r") as infile:
for line in infile:
if re.search(needle, line):
return True
return False
def _append_to_file(self, line, filename):
with open(filename, "a") as outfile:
outfile.write(line)
def _initialize_container(self):
print "Initializing container"
os.system('swift -A http://localhost:8080/auth/v1.0 -U simba:simba -K simba123 post simbastore')
def _replace_in_file(self, before, after, filename):
with open(filename, "r") as infile:
lines = infile.readlines()
fh, path = tempfile.mkstemp()
with open(path, 'w') as outfile:
for line in lines:
line = re.sub(before, after, line)
outfile.write(line)
os.close(fh)
os.rename(path, filename)
def _build_ring(self, ring_type, port):
b = "%s.builder" % ring_type
dev = "%s1" % conf.data_disk
os.system("swift-ring-builder %s create %d 3 1" % (b, conf.swift_num_partitions))
znum=1
for node in self.all_ips:
os.system("swift-ring-builder %s add z%d-%s:%d/%s 100" % (b, znum, node, port, dev))
znum += 1
os.system("swift-ring-builder %s" % b)
os.system("swift-ring-builder %s rebalance" % b)
def _build_rings(self):
print 'self.all_ips[0]==', self.all_ips[0]
print 'self.my_ip==', self.my_ip
if self.my_ip == self.all_ips[0]:
self._build_ring('account', 6002)
self._build_ring('container', 6001)
self._build_ring('object', 6000)
shutil.copy2('account.ring.gz', '/etc/swift')
shutil.copy2('container.ring.gz', '/etc/swift')
shutil.copy2('object.ring.gz', '/etc/swift')
os.system('chown -R swift:swift /etc/swift')
def _configure_limits(self):
s = """
* soft nofile 999999
* hard nofile 999999
"""
with open('/etc/security/limits.conf','a') as outfile:
outfile.write(s)
os.system('sysctl -p')
def _configure_sysctl(self):
s = """
# disable TIME_WAIT.. wait..
net.ipv4.tcp_tw_recycle=1
net.ipv4.tcp_tw_reuse=1
# disable syn cookies
net.ipv4.tcp_syncookies = 0
# double amount of allowed conntrack
net.ipv4.netfilter.ip_conntrack_max = 262144
net.core.rmem_max = 8388608
net.core.wmem_max = 8388608
net.core.rmem_default = 65536
net.core.wmem_default = 65536
net.ipv4.tcp_rmem = 4096 87380 8388608
net.ipv4.tcp_wmem = 4096 65536 8388608
net.ipv4.tcp_mem = 8388608 8388608 8388608
net.ipv4.ip_local_port_range = 15000 61000
net.ipv4.tcp_fin_timeout = 15
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_tw_reuse = 1
net.core.somaxconn = 32768
net.ipv4.tcp_max_syn_backlog = 10240
net.core.netdev_max_backlog = 10240
fs.file-max = 999999
"""
with open('/etc/sysctl.conf','w') as outfile:
outfile.write(s)
os.system('sysctl -p')
def _update_users(self):
if not self._grep('swift', '/etc/passwd'):
self._append_to_file('swift:x:109:120::/home/swift:/bin/false', '/etc/passwd')
if not self._grep('swift', '/etc/group'):
self._append_to_file('swift:x:120:', '/etc/group')
os.system('mkdir -p /home/swift')
os.system('chown swift:swift /
|
home/swift')
def _configure_rsync(self):
s="""
uid = swift
gid = swift
log file = /var/log/rsyncd.log
pid file = /var/run/rsyncd.pid
address = %s
[account]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/account.lock
[container]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lo
|
ck/container.lock
[object]
max connections = 2
path = /srv/node/
read only = false
lock file = /var/lock/object.lock
""" % self.my_ip
with open('/etc/rsyncd.conf', 'w') as outfile:
outfile.write(s)
self._replace_in_file('RSYNC_ENABLE=false', 'RSYNC_ENABLE=true', '/etc/default/rsync')
def _configure_account_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s= """\
[DEFAULT]
bind_ip = %s
workers = 2
devices=/srv/node
[pipeline:main]
pipeline = account-server
[app:account-server]
use = egg:swift#account
[account-replicator]
concurrency = 4
[account-auditor]
[account-reaper]
concurrency = 4\
""" % self.my_ip
with open('/etc/swift/account-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_container_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s="""\
[DEFAULT]
bind_ip = %s
workers = 2
devices=/srv/node
[pipeline:main]
pipeline = container-server
[app:container-server]
use = egg:swift#container
[container-replicator]
concurrency = 4
[container-updater]
concurrency = 2
[container-auditor]
[container-sync]\
""" % self.my_ip
with open('/etc/swift/container-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_object_server(self):
if not os.path.exists('/etc/swift'):
os.makedirs('/etc/swift')
s="""\
[DEFAULT]
bind_ip = %s
workers = 4
devices=/srv/node
[pipeline:main]
pipeline = object-server
[app:object-server]
use = egg:swift#object
network_chunk_size=65536
disk_chunk_size=65536
threads_per_disk=4
replication_concurrency=1
[object-replicator]
concurrency = 1
[object-updater]
concurrency = 1
[object-auditor]
files_per_second = 1
bytes_per_second = 65536
""" % self.my_ip
with open('/etc/swift/object-server.conf', 'w') as outfile:
outfile.write(s)
def _configure_hash(self):
s="""\
[swift-hash]
# random unique strings that can never change (DO NOT LOSE)
swift_hash_path_prefix = 256b3282f8acc0ee0dad2565d1ab670a
swift_hash_path_suffix = 13409460ac1879aff0b161c750fa7db1
"""
with open('/etc/swift/swift.conf', 'w') as outfile:
outfile.write(s)
def _configure_proxy_server(self):
s="""\
[DEFAULT]
bind_port = 8080
workers = 8
user = swift
[pipeline:main]
pipeline = healthcheck cache tempauth proxy-server
[app:proxy-server]
use = egg:swift#proxy
allow_account_management = true
account_autocreate = true
[filter:tempauth]
use = egg:swift#tempauth
user_system_root = testpass .admin https://%s:8080/v1/AUTH_system
user_simba_simba = simba123 .admin http://%s:8080/v1/AUTH_system
token_life = 604800
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:cache]
use = egg:swift#memcache
""" % (self.my_ip, self.my_ip)
all_proxy_nodes = [socket.gethostbyname(x) for x in conf.proxy_nodes]
m = "memcache_servers = %s:11211," % all_proxy_nodes[0]
for p in all_proxy_nodes[1:]:
m += "%s:11211," % p
m += '\n'
with open('/etc/swift/proxy-server.conf', 'w') as outfile:
outfile.write(s)
outfile.write(m)
def _configure_as_storage_node(self):
self._update_users()
os.system("./partition.sh %s %s" % (conf.data_disk, self.base_dir))
os.system("chown swift:swift %s" % self.base_dir)
self._configure_rsync()
self._configure_account_server()
self._configure_container_server()
self._configure_object_server()
self._configure_hash()
self._build_rings()
self._configure_sysctl()
self._configure_limits()
def _configure_as_proxy_node(self):
self._update_users()
# IF PROXY NODES = SWIFT NODES, LEAVE THIS COMMENTED OUT
#os.system("./partition.sh %s %s" % (conf.data_disk, self.base_dir))
#os.system("chgrp %s %s" % (conf.proj, self.base_dir))
#os.system("chmod g+w %s" % self.base_dir)
self._configure_proxy_server()
self._replace_in_file('^-l.*', '-l %s' % self.my_ip, '/etc/memcached.conf')
self._configure_hash()
self._build_rings()
self._configure_sysctl()
def _start_proxy_node(self):
os.system("service memcached stop")
os.system("service memcached start")
os.system('swift-init proxy start')
if self.myname == self.allnodes[-1]:
self._initialize_container()
def _start_storage_node(self):
os.system("service rsync restart")
os.system('swift-init all start')
def configure(self):
print 'Configure swift...'
if self.is_storage:
print '
|
realsobek/freeipa
|
ipalib/install/certstore.py
|
Python
|
gpl-3.0
| 15,409
| 0.00013
|
# Authors:
# Jan Cholasta <jcholast@redhat.com>
#
# Copyright (C) 2014 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
LDAP shared certificate store.
"""
from pyasn1.error import PyAsn1Error
from ipapython.dn import DN
from ipapython.certdb import get_ca_nickname
from ipalib import errors, x509
def _parse_cert(dercert):
try:
cert = x509.load_certificate(dercert, x509.DER)
subject = DN(cert.subject)
issuer = DN(cert.issuer)
serial_number = cert.serial_number
public_key_info = x509.get_der_public_key_info(dercert, x509.DER)
except (ValueError, PyAsn1Error) as e:
raise ValueError("failed to decode certificate: %s" % e)
subject = str(subject).replace('\\;', '\\3b')
issuer = str(issuer).replace('\\;', '\\3b')
issuer_serial = '%s;%s' % (issuer, serial_number)
return subject, issuer_serial, public_key_info
def init_ca_entry(entry, dercert, nickname, trusted, ext_key_usage):
"""
Initialize certificate store entry for a CA certificate.
"""
subject, issuer_serial, public_key = _parse_cert(dercert)
if ext_key_usage is not None:
try:
cert_eku = x509.get_ext_key_usage(dercert, x509.DER)
except ValueError as e:
raise ValueError("failed to decode certificate: %s" % e)
if cert_eku is not None:
cert_eku -= {x509.EKU_SERVER_AUTH, x509.EKU_CLIENT_AUTH,
x509.EKU_EMAIL_PROTECTION, x509.EKU_CODE_SIGNING,
x509.EKU_ANY, x509.EKU_PLACEHOLDER}
ext_key_usage = ext_key_usage | cert_eku
entry['objectClass'] = ['ipaCertificate', 'pkiCA', 'ipaKeyPolicy']
entry['cn'] = [nickname]
entry['ipaCertSubject'] = [subject]
entry['ipaCertIssuerSerial'] = [issuer_serial]
entry['ipaPublicKey'] = [public_key]
entry['cACertificate;binary'] = [dercert]
if trusted is not None:
entry['ipaKeyTrust'] = ['trusted' if trusted else 'distrusted']
if ext_key_usage is not None:
ext_key_usage = list(ext_key_usage)
if not ext_key_usage:
ext_key_usage.append(x509.EKU_PLACEHOLDER)
entry['ipaKeyExtUsage'] = ext_key_usage
def update_compat_ca(ldap, base_dn, dercert):
"""
Update the CA certificate in cn=CAcert,cn=ipa,cn=etc,SUFFIX.
"""
dn = DN(('cn', 'CAcert'), ('cn', 'ipa'), ('cn', 'etc'), base_dn)
try:
entry = ldap.get_entry(dn, attrs_list=['cACertificate;binary'])
entry.single_value['cACertificate;binary'] = dercert
ldap.update_entry(entry)
except errors.NotFound:
entry = ldap.make_entry(dn)
entry['objectClass'] = ['nsContainer', 'pkiCA']
entry.si
|
ngle_value['cn'] = 'CAcert'
entry.single_value['cACertificate;binary
|
'] = dercert
ldap.add_entry(entry)
except errors.EmptyModlist:
pass
def clean_old_config(ldap, base_dn, dn, config_ipa, config_compat):
"""
Remove ipaCA and compatCA flags from their previous carriers.
"""
if not config_ipa and not config_compat:
return
try:
result, _truncated = ldap.find_entries(
base_dn=DN(('cn', 'certificates'), ('cn', 'ipa'), ('cn', 'etc'),
base_dn),
filter='(|(ipaConfigString=ipaCA)(ipaConfigString=compatCA))',
attrs_list=['ipaConfigString'])
except errors.NotFound:
return
for entry in result:
if entry.dn == dn:
continue
for config in list(entry['ipaConfigString']):
if config.lower() == 'ipaca' and config_ipa:
entry['ipaConfigString'].remove(config)
elif config.lower() == 'compatca' and config_compat:
entry['ipaConfigString'].remove(config)
try:
ldap.update_entry(entry)
except errors.EmptyModlist:
pass
def add_ca_cert(ldap, base_dn, dercert, nickname, trusted=None,
ext_key_usage=None, config_ipa=False, config_compat=False):
"""
Add new entry for a CA certificate to the certificate store.
"""
container_dn = DN(('cn', 'certificates'), ('cn', 'ipa'), ('cn', 'etc'),
base_dn)
dn = DN(('cn', nickname), container_dn)
entry = ldap.make_entry(dn)
init_ca_entry(entry, dercert, nickname, trusted, ext_key_usage)
if config_ipa:
entry.setdefault('ipaConfigString', []).append('ipaCA')
if config_compat:
entry.setdefault('ipaConfigString', []).append('compatCA')
if config_compat:
update_compat_ca(ldap, base_dn, dercert)
ldap.add_entry(entry)
clean_old_config(ldap, base_dn, dn, config_ipa, config_compat)
def update_ca_cert(ldap, base_dn, dercert, trusted=None, ext_key_usage=None,
config_ipa=False, config_compat=False):
"""
Update existing entry for a CA certificate in the certificate store.
"""
subject, issuer_serial, public_key = _parse_cert(dercert)
filter = ldap.make_filter({'ipaCertSubject': subject})
result, _truncated = ldap.find_entries(
base_dn=DN(('cn', 'certificates'), ('cn', 'ipa'), ('cn', 'etc'),
base_dn),
filter=filter,
attrs_list=['cn', 'ipaCertSubject', 'ipaCertIssuerSerial',
'ipaPublicKey', 'ipaKeyTrust', 'ipaKeyExtUsage',
'ipaConfigString', 'cACertificate;binary'])
entry = result[0]
dn = entry.dn
for old_cert in entry['cACertificate;binary']:
# Check if we are adding a new cert
if old_cert == dercert:
break
else:
# We are adding a new cert, validate it
if entry.single_value['ipaCertSubject'].lower() != subject.lower():
raise ValueError("subject name mismatch")
if entry.single_value['ipaPublicKey'] != public_key:
raise ValueError("subject public key info mismatch")
entry['ipaCertIssuerSerial'].append(issuer_serial)
entry['cACertificate;binary'].append(dercert)
# Update key trust
if trusted is not None:
old_trust = entry.single_value.get('ipaKeyTrust')
new_trust = 'trusted' if trusted else 'distrusted'
if old_trust is not None and old_trust.lower() != new_trust:
raise ValueError("inconsistent trust")
entry.single_value['ipaKeyTrust'] = new_trust
# Update extended key usage
if trusted is not False:
if ext_key_usage is not None:
old_eku = set(entry.get('ipaKeyExtUsage', []))
old_eku.discard(x509.EKU_PLACEHOLDER)
new_eku = old_eku | ext_key_usage
if not new_eku:
new_eku.add(x509.EKU_PLACEHOLDER)
entry['ipaKeyExtUsage'] = list(new_eku)
else:
entry.pop('ipaKeyExtUsage', None)
# Update configuration flags
is_ipa = False
is_compat = False
for config in entry.get('ipaConfigString', []):
if config.lower() == 'ipaca':
is_ipa = True
elif config.lower() == 'compatca':
is_compat = True
if config_ipa and not is_ipa:
entry.setdefault('ipaConfigString', []).append('ipaCA')
if config_compat and not is_compat:
entry.setdefault('ipaConfigString', []).append('compatCA')
if is_compat or config_compat:
update_compat_ca(ldap, base_dn, dercert)
ldap.update_entry(entry)
clean_old_config(ldap, base_dn, dn, config_ipa, config_compat)
def put_ca_cert(ldap, base_dn, dercert, nickname, trusted=None,
|
Stavitsky/python-neutronclient
|
neutronclient/neutron/v2_0/subnet.py
|
Python
|
apache-2.0
| 9,395
| 0
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
from oslo.serialization import jsonutils
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.i18n import _
from neutronclient.neutron import v2_0 as neutronV20
def _format_allocation_pools(subnet):
try:
return '\n'.join([jsonutils.dumps(pool) for pool in
subnet['allocation_pools']])
except (TypeError, KeyError):
return ''
def _format_dns_nameservers(subnet):
try:
return '\n'.join([jsonutils.dumps(server) for server in
subnet['dns_nameservers']])
except (TypeError, KeyError):
return ''
def _format_host_routes(subnet):
try:
return '\n'.join([jsonutils.dumps(route) for route in
subnet['host_routes']])
except (TypeError, KeyError):
return ''
def add_updatable_arguments(parser):
parser.add_argument(
'--name',
help=_('Name of this subnet.'))
gateway_sg = parser.add_mutually_exclusive_group()
gateway_sg.add_argument(
'--gateway', metavar='GATEWAY_IP',
help=_('Gateway IP of this subnet.'))
gateway_sg.add_argument(
'--no-gateway',
action='store_true',
help=_('No distribution of gateway.'))
parser.add_argument(
'--allocation-pool', metavar='start=IP_ADDR,end=IP_ADDR',
action='append', dest='allocation_pools', type=utils.str2dict,
help=_('Allocation pool IP addresses for this subnet '
'(This option can be repeated).'))
parser.add_argument(
'--allocation_pool',
action='append', dest='allocation_pools', type=utils.str2dict,
help=argparse.SUPPRESS)
parser.add_argument(
'--host-route', metavar='destination=CIDR,nexthop=IP_ADDR',
action='append', dest='host_routes', type=utils.str2dict,
help=_('Additional route (This option can be repeated).'))
parser.add_argument(
'--dns-nameserver', metavar='DNS_NAMESERVER',
action='append', dest='dns_nameservers',
help=_('DNS name server for this subnet '
'(This option can be repeated).'))
parser.add_argument(
'--disable-dhcp',
action='store_true',
help=_('Disable DHCP for this subnet.'))
parser.add_argument(
'--enable-dhcp',
action='store_true',
help=_('Enable DHCP for this subnet.'))
# NOTE(ihrachys): yes, that's awful, but should be left as-is for
# backwards compatibility for versions <=2.3.4 that passed the
# boolean values through to the server without any argument
# validation.
parser.add_argument(
'--enable-dhcp=True',
action='store_true',
dest='enable_dhcp',
help=argparse.SUPPRESS)
parser.add_argument(
'--enable-dhcp=False',
action='store_true',
dest='disable_dhcp',
help=argparse.SUPPRESS)
def updatable_args2body(parsed_args, body, for_create=True):
if parsed_args.disable_dhcp and parsed_args.enable_dhcp:
raise exceptions.CommandError(_(
"You cannot enable and disable DHCP at the same time."))
if parsed_args.no_gateway:
body['subnet'].update({'gateway_ip': None})
elif parsed_args.gateway:
body['subnet'].update({'gateway_ip': parsed_args.gateway})
if parsed_args.name:
body['subnet'].update({'name': parsed_args.name})
if parsed_args.disable_dhcp:
body['subnet'].update({'enable_dhcp': False})
if parsed_args.enable_dhcp:
body['subnet'].update({'enable_dhcp': True})
if parsed_args.allocation_pools:
body['subnet']['allocation_pools'] = parsed_args.allocation_pools
if parsed_args.host_routes:
body['subnet']['host_routes'] = parsed_args.host_routes
if parsed_args.dns_nameservers:
body['subnet']['dns_nameservers'] = parsed_args.dns_nameservers
if for_create and parsed_args.ipv6_ra_mode:
if parsed_args.ip_version == 4:
raise exceptions.CommandError(_("--ipv6-ra-mode is invalid "
"when --ip-version is 4"))
body['subnet']['ipv6_ra_mode'] = parsed_args.ipv6_ra_mode
if for_create and parsed_args.ipv6_address_mode:
if parsed_args.ip_version == 4:
raise exceptions.CommandError(_("--ipv6-address-mode is "
"invalid when --ip-version "
"is 4"))
body['subnet']['ipv6_address_mode'] = parsed_args.ipv6_address_mode
class ListSubnet(neutronV20.ListCommand):
"""List subnets that belong to a given tenant."""
resource = 'subnet'
_formatters = {'allocation_pools': _format_allocation_pools,
|
'dns_nameservers': _format_dns_nameservers,
'host_routes': _format_host_routes, }
list_columns = ['id', 'name', 'cidr', 'allocation_pools']
pagination_support = True
sorting_support = True
class ShowSubnet(neutronV20.ShowCommand):
"""Show information of a given subnet."""
resource = 'subnet'
class CreateSubnet(neutronV20.CreateCommand):
"""Create a subnet for a given tenant."""
resource = 'subnet'
def add_known_argumen
|
ts(self, parser):
add_updatable_arguments(parser)
parser.add_argument(
'--ip-version',
type=int,
default=4, choices=[4, 6],
help=_('IP version to use, default is 4.'))
parser.add_argument(
'--ip_version',
type=int,
choices=[4, 6],
help=argparse.SUPPRESS)
parser.add_argument(
'network_id', metavar='NETWORK',
help=_('Network ID or name this subnet belongs to.'))
parser.add_argument(
'cidr', nargs='?', metavar='CIDR',
help=_('CIDR of subnet to create.'))
parser.add_argument(
'--ipv6-ra-mode',
choices=['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'],
help=_('IPv6 RA (Router Advertisement) mode.'))
parser.add_argument(
'--ipv6-address-mode',
choices=['dhcpv6-stateful', 'dhcpv6-stateless', 'slaac'],
help=_('IPv6 address mode.'))
parser.add_argument(
'--subnetpool', metavar='SUBNETPOOL',
help=_('ID or name of subnetpool from which this subnet '
'will obtain a CIDR.'))
parser.add_argument(
'--prefixlen', metavar='PREFIX_LENGTH',
help=_('Prefix length for subnet allocation from subnetpool.'))
def args2body(self, parsed_args):
_network_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'network', parsed_args.network_id)
body = {'subnet': {'network_id': _network_id,
'ip_version': parsed_args.ip_version, }, }
if parsed_args.cidr:
# With subnetpool, cidr is now optional for creating subnet.
cidr = parsed_args.cidr
body['subnet'].update({'cidr': cidr})
unusable_cidr = '/32' if parsed_args.ip_version == 4 else '/128'
if cidr.endswith(unusable_cidr):
self.log.warning(_("An IPv%(ip)d subnet with a %(cidr)s CIDR "
"will have only one usable IP address so "
"the device attached to it will not have "
"any IP connectivity.")
|
NickRuiz/mt-serverland
|
dashboard/api/authentication.py
|
Python
|
bsd-3-clause
| 1,948
| 0.002567
|
'''
Authentication by token for the serverland dashboard Web API.
Project: MT Server Land prototype code
Author: Will Roberts <William.Roberts@dfki.de>
'''
from piston.utils import rc, translate_mime, MimerDataException
from serverland.dashboard.api.models import AuthToken
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
class TokenAuthentication(object):
'''
Token-based authentication for dashboard API access.
Authorized users will have
|
a 4-byte hexadecimal access token; by
passing this value with the key "token" to an API method, the user
will be authenticated.
'''
def is_authenticated(self, request):
'''Determines whether a given HTTP request is authenticated or
not, and sets the requests user field if it is.'''
token = None
# get a token if this is a GET
|
if request.GET and 'token' in request.GET:
token = request.GET['token']
# get a token if this is a POST
if request.POST and 'token' in request.POST:
token = request.POST['token']
# translate mime-types in the request if this is a mime
# message
try:
translate_mime(request)
except MimerDataException:
pass
# check if there's a token in the mime data
if ( hasattr(request, 'data') and
request.data and
'token' in request.data ):
token = request.data['token']
if token:
try:
token = AuthToken.objects.get(auth_token = token)
if token.enabled:
request.user = token.user
return True
except (ObjectDoesNotExist, MultipleObjectsReturned):
pass
return False
def challenge(self):
'''Gives the HTTPResponse returned when is_authenticated
returns False.'''
return rc.FORBIDDEN
|
wearpants/osf.io
|
website/mails/mails.py
|
Python
|
apache-2.0
| 9,943
| 0.002715
|
# -*- coding: utf-8 -*-
"""OSF mailing utilities.
Email templates go in website/templates/emails
Templates must end in ``.txt.mako`` for plaintext emails or``.html.mako`` for html emails.
You can then create a `Mail` object given the basename of the template and
the email subject. ::
CONFIRM_EMAIL = Mail(tpl_prefix='confirm', subject="Confirm your email address")
You can then use ``send_mail`` to send the email.
Usage: ::
from website import mails
...
mails.send_mail('foo@bar.com', mails.CONFIRM_EMAIL, user=user)
"""
import os
import logging
from mako.lookup import TemplateLookup, Template
from framework.email import tasks
from website import settings
logger = logging.getLogger(__name__)
EMAIL_TEMPLATES_DIR = os.path.join(settings.TEMPLATES_PATH, 'emails')
_tpl_lookup = TemplateLookup(
directories=[EMAIL_TEMPLATES_DIR],
)
TXT_EXT = '.txt.mako'
HTML_EXT = '.html.mako'
class Mail(object):
"""An email object.
:param str tpl_prefix: The template name prefix.
:param str subject: The subject of the email.
:param iterable categories: Categories to add to the email using SendGrid's
|
SMTPAPI. Used for email analytics.
See https://sendgrid.com/docs/User_Guide/Statistics/categories
|
.html
"""
def __init__(self, tpl_prefix, subject, categories=None):
self.tpl_prefix = tpl_prefix
self._subject = subject
self.categories = categories
def html(self, **context):
"""Render the HTML email message."""
tpl_name = self.tpl_prefix + HTML_EXT
return render_message(tpl_name, **context)
def text(self, **context):
"""Render the plaintext email message"""
tpl_name = self.tpl_prefix + TXT_EXT
return render_message(tpl_name, **context)
def subject(self, **context):
return Template(self._subject).render(**context)
def render_message(tpl_name, **context):
"""Render an email message."""
tpl = _tpl_lookup.get_template(tpl_name)
return tpl.render(**context)
def send_mail(to_addr, mail, mimetype='plain', from_addr=None, mailer=None,
username=None, password=None, callback=None, **context):
"""Send an email from the OSF.
Example: ::
from website import mails
mails.send_email('foo@bar.com', mails.TEST, name="Foo")
:param str to_addr: The recipient's email address
:param Mail mail: The mail object
:param str mimetype: Either 'plain' or 'html'
:param function callback: celery task to execute after send_mail completes
:param **context: Context vars for the message template
.. note:
Uses celery if available
"""
from_addr = from_addr or settings.FROM_EMAIL
mailer = mailer or tasks.send_email
subject = mail.subject(**context)
message = mail.text(**context) if mimetype in ('plain', 'txt') else mail.html(**context)
# Don't use ttls and login in DEBUG_MODE
ttls = login = not settings.DEBUG_MODE
logger.debug('Sending email...')
logger.debug(u'To: {to_addr}\nFrom: {from_addr}\nSubject: {subject}\nMessage: {message}'.format(**locals()))
kwargs = dict(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype=mimetype,
ttls=ttls,
login=login,
username=username,
password=password,
categories=mail.categories,
)
if settings.USE_EMAIL:
if settings.USE_CELERY:
return mailer.apply_async(kwargs=kwargs, link=callback)
else:
ret = mailer(**kwargs)
if callback:
callback()
return ret
# Predefined Emails
TEST = Mail('test', subject='A test email to ${name}', categories=['test'])
EXTERNAL_LOGIN_CONFIRM_EMAIL_CREATE = Mail('external_confirm_create', subject='Open Science Framework Account Verification')
EXTERNAL_LOGIN_CONFIRM_EMAIL_LINK = Mail('external_confirm_link', subject='Open Science Framework Account Verification')
EXTERNAL_LOGIN_LINK_SUCCESS = Mail('external_confirm_success', subject='Open Science Framework Account Verification Success')
INITIAL_CONFIRM_EMAIL = Mail('initial_confirm', subject='Open Science Framework Account Verification')
CONFIRM_EMAIL = Mail('confirm', subject='Add a new email to your OSF account')
CONFIRM_EMAIL_PREREG = Mail('confirm_prereg', subject='Open Science Framework Account Verification, Preregistration Challenge')
CONFIRM_EMAIL_ERPC = Mail('confirm_erpc', subject='Open Science Framework Account Verification, Election Research Preacceptance Competition')
CONFIRM_MERGE = Mail('confirm_merge', subject='Confirm account merge')
REMOVED_EMAIL = Mail('email_removed', subject='Email address removed from your OSF account')
PRIMARY_EMAIL_CHANGED = Mail('primary_email_changed', subject='Primary email changed')
INVITE_DEFAULT = Mail('invite_default', subject='You have been added as a contributor to an OSF project.')
INVITE_PREPRINT = Mail('invite_preprint', subject='You have been added as a contributor to an OSF preprint.')
CONTRIBUTOR_ADDED_DEFAULT = Mail('contributor_added_default', subject='You have been added as a contributor to an OSF project.')
CONTRIBUTOR_ADDED_PREPRINT = Mail('contributor_added_preprint', subject='You have been added as a contributor to an OSF preprint.')
FORWARD_INVITE = Mail('forward_invite', subject='Please forward to ${fullname}')
FORWARD_INVITE_REGISTERED = Mail('forward_invite_registered', subject='Please forward to ${fullname}')
FORGOT_PASSWORD = Mail('forgot_password', subject='Reset Password')
PASSWORD_RESET = Mail('password_reset', subject='Your OSF password has been reset')
PENDING_VERIFICATION = Mail('pending_invite', subject='Your account is almost ready!')
PENDING_VERIFICATION_REGISTERED = Mail('pending_registered', subject='Received request to be a contributor')
REQUEST_EXPORT = Mail('support_request', subject='[via OSF] Export Request')
REQUEST_DEACTIVATION = Mail('support_request', subject='[via OSF] Deactivation Request')
CONFERENCE_SUBMITTED = Mail(
'conference_submitted',
subject='Project created on Open Science Framework',
)
CONFERENCE_INACTIVE = Mail(
'conference_inactive',
subject='Open Science Framework Error: Conference inactive',
)
CONFERENCE_FAILED = Mail(
'conference_failed',
subject='Open Science Framework Error: No files attached',
)
DIGEST = Mail(
'digest', subject='OSF Notifications',
categories=['notifications', 'notifications-digest']
)
TRANSACTIONAL = Mail(
'transactional', subject='OSF: ${subject}',
categories=['notifications', 'notifications-transactional']
)
# Retraction related Mail objects
PENDING_RETRACTION_ADMIN = Mail(
'pending_retraction_admin',
subject='Withdrawal pending for one of your projects.'
)
PENDING_RETRACTION_NON_ADMIN = Mail(
'pending_retraction_non_admin',
subject='Withdrawal pending for one of your projects.'
)
# Embargo related Mail objects
PENDING_EMBARGO_ADMIN = Mail(
'pending_embargo_admin',
subject='Registration pending for one of your projects.'
)
PENDING_EMBARGO_NON_ADMIN = Mail(
'pending_embargo_non_admin',
subject='Registration pending for one of your projects.'
)
# Registration related Mail Objects
PENDING_REGISTRATION_ADMIN = Mail(
'pending_registration_admin',
subject='Registration pending for one of your projects.'
)
PENDING_REGISTRATION_NON_ADMIN = Mail(
'pending_registration_non_admin',
subject='Registration pending for one of your projects.'
)
PENDING_EMBARGO_TERMINATION_ADMIN = Mail(
'pending_embargo_termination_admin',
subject='Request to end an embargo early for one of your projects.'
)
PENDING_EMBARGO_TERMINATION_NON_ADMIN = Mail(
'pending_embargo_termination_non_admin',
subject='Request to end an embargo early for one of your projects.'
)
FILE_OPERATION_SUCCESS = Mail(
'file_operation_success',
subject='Your ${action} has finished',
)
FILE_OPERATION_FAILED = Mail(
'file_operation_failed',
subject='Your ${action} has failed',
)
UNESCAPE = '<% from website.util.sanitize import unescape_entities %> ${unescape_entities(src.title)}'
PROBLEM_REGISTERING
|
sdpython/ensae_teaching_cs
|
src/ensae_teaching_cs/special/image/image_synthese_facette_image.py
|
Python
|
mit
| 2,656
| 0
|
# -*- coding: utf-8 -*-
"""
@file
@brief image et synthèse
"""
from .image_synthese_facette import Rectangle
from .image_synthese_base import Rayon, Couleur
from .image_synthese_sphere import Sphere
class RectangleImage(Rectangle):
"""définit un rectangle contenant un portrait"""
def __init__(self, a, b, c, d, nom_image, pygame, invertx=False):
"""initialisation, si d == None, d est calculé comme étant
le symétrique de b par rapport au milieu du segment [ac],
la texture est un
|
e image,
si invertx == True, inverse l'image selon l'axe des x"""
Rectangle.__init__(self, a, b, c, d, Couleur(0, 0, 0))
self.image = pygame.image.load(nom_image)
self.nom_image = nom_image
self.invert
|
x = invertx
def __str__(self):
"""affichage"""
s = "rectangle image --- a : " + str(self.a)
s += " b : " + str(self.b)
s += " c : " + str(self.c)
s += " d : " + str(self.d)
s += " image : " + self.nom_image
return s
def couleur_point(self, p):
"""retourne la couleur au point de coordonnée p"""
ap = p - self.a
ab = self.b - self.a
ad = self.d - self.a
abn = ab.norme2()
adn = ad.norme2()
x = ab.scalaire(ap) / abn
y = ad.scalaire(ap) / adn
sx, sy = self.image.get_size()
k, li = int(x * sx), int(y * sy)
k = min(k, sx - 1)
li = min(li, sy - 1)
li = sy - li - 1
if not self.invertx:
c = self.image.get_at((k, li))
else:
c = self.image.get_at((sx - k - 1, li))
cl = Couleur(float(c[0]) / 255, float(c[1]) / 255, float(c[2]) / 255)
return cl
class SphereReflet (Sphere):
"""implémente une sphère avec un reflet"""
def __init__(self, centre, rayon, couleur, reflet):
"""initialisation, reflet est un coefficient de réflexion"""
Sphere.__init__(self, centre, rayon, couleur)
self.reflet = reflet
def __str__(self):
"""affichage"""
s = "sphere reflet --- centre : " + str(self.centre)
s += " rayon : " + str(self.rayon)
s += " couleur : " + str(self.couleur)
return s
def rayon_reflechi(self, rayon, p):
"""retourne le rayon réfléchi au point p de la surface,
si aucune, retourne None"""
if p == rayon.origine:
return None
n = self.normale(p, rayon)
n = n.renorme()
y = n.scalaire(rayon.direction)
d = rayon.direction - n * y * 2
r = Rayon(p, d, rayon.pixel, rayon.couleur * self.reflet)
return r
|
uber/vertica-python
|
vertica_python/vertica/messages/backend_messages/parameter_status.py
|
Python
|
apache-2.0
| 3,051
| 0.000983
|
# Copyright (c) 2018-2021 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
ParameterStatus mes
|
sage
A ParameterStatus message will be generated whenever the backend believes the
frontend should know about a setting parameter value. For example, when you do
SET SESSION AUTOCOMMIT ON | OFF, you get back a parameter status telling you the
new value of autocommit.
At present Vertica supports a handful of parameters, they are:
standard_conforming_strings, server_version, client_locale, client_label,
long_string_types, protocol_version, auto_commit, MARS
More parameters would be added in the
|
future. Accordingly, a frontend should
simply ignore ParameterStatus for parameters that it does not understand or care
about.
"""
from __future__ import print_function, division, absolute_import
from struct import unpack
from ..message import BackendMessage
class ParameterStatus(BackendMessage):
message_id = b'S'
def __init__(self, data):
BackendMessage.__init__(self)
null_byte = data.find(b'\x00')
unpacked = unpack('{0}sx{1}sx'.format(null_byte, len(data) - null_byte - 2), data)
self.name = unpacked[0].decode('utf-8')
self.value = unpacked[1].decode('utf-8')
def __str__(self):
return "ParameterStatus: {} = {}".format(self.name, self.value)
BackendMessage.register(ParameterStatus)
|
exic/spade2
|
xmppd/modules/oob.py
|
Python
|
lgpl-2.1
| 818
| 0.031785
|
# -*- coding: UTF-8 -*-
from xmpp import *
class OOB(PlugIn):
NS = "jabber:iq:oob"
|
def plugin(self,server):
server.Dispatcher.RegisterHandler('iq',self.OOBIqHandler,typ='set',ns="jabber:iq:oob",xmlns=NS_CLIENT)
server.Dispatcher.RegisterHandler('iq',self.OOBIqHandler,typ='result',ns="jabber:iq:oob",xmlns=NS_CLIENT)
server.Dispatcher.Regi
|
sterHandler('iq',self.OOBIqHandler,typ='error',ns="jabber:iq:oob",xmlns=NS_CLIENT)
def OOBIqHandler(self, session, stanza):
self.DEBUG("OOB Iq handler called","info")
s = self._owner.getsession(str(stanza['to']))
if s:
# Relay stanza
s.enqueue(stanza)
self.DEBUG("OOB stanza relayed from %s to %s"%(str(session.peer),str(stanza['to'])),"info")
raise NodeProcessed
|
vsoch/singularity-python
|
singularity/analysis/reproduce/utils.py
|
Python
|
agpl-3.0
| 6,015
| 0.002161
|
'''
Copyright (C) 2016-2019 Vanessa Sochat.
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public
License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
from spython.main import Client
from spython.utils import get_singularity_version
from .criteria import (
assess_content,
include_file,
is_root_owned
)
from .levels import get_level
from singularity.logger import bot
import hashlib
import tarfile
import tempfile
import sys
import os
import re
import io
Client.quiet = True
def extract_guts(image_path,
file_filter=None,
tag_root=True,
include_sizes=True):
'''extract the file guts from an image.
Parameters
==========
image_path: can be a tar, a Singularity image (sif) or a sandbox
file_filter: the file filter to extract guts for.
tag_root: if True (default) include if root owned or not.
include_sizes: include content sizes (defaults to True)
'''
if file_filter is None:
file_filter = get_level('IDENTICAL')
|
results = dict()
digest = dict()
allfiles = []
if tag_root:
roots = dict()
if include_sizes:
sizes = dict()
# Option 1: We are given a sandbox
if os.path.isdir(image_path):
sandbox = image_path
# Option 2: it's not a sandbox, and we need to export.
elif 'version 3' in get_singularity_version():
sandbox = Client.export(image_path)
else:
sandbox = Client.image.export(image_path)
# If it's t
|
ar, extract
if os.path.isfile(sandbox) and sandbox.endswith('tar'):
with tarfile.open(sandbox) as tar:
sandbox = os.path.join(os.path.dirname(sandbox), 'sandbox')
tar.extractall(path=sandbox)
# Recursively walk through sandbox
for root, dirnames, filenames in os.walk(sandbox):
for filename in filenames:
sandbox_name = os.path.join(root, filename)
# Remove the sandbox base
member_name = sandbox_name.lstrip(sandbox)
allfiles.append(member_name)
included = False
# Skip over directories and symbolic links
if os.path.isdir(sandbox_name) or os.path.islink(sandbox_name):
continue
# If we have flagged to include, and not flagged to skip
elif assess_content(sandbox_name, file_filter):
digest[member_name] = extract_content(sandbox_name, return_hash=True)
included = True
elif include_file(sandbox_name, file_filter):
hasher = hashlib.md5()
with open(sandbox_name, 'rb') as filey:
buf = filey.read()
hasher.update(buf)
digest[member_name] = hasher.hexdigest()
included = True
# Derive size, and if root owned
if included:
if include_sizes:
sizes[member_name] = os.stat(sandbox_name).st_size
if tag_root:
roots[member_name] = is_root_owned(sandbox_name)
results['all'] = allfiles
results['hashes'] = digest
if include_sizes:
results['sizes'] = sizes
if tag_root:
results['root_owned'] = roots
return results
def create_tarfile(source_dir, output_filename=None):
''' create a tarfile from a source directory'''
if output_filename == None:
output_filename = "%s/tmptar.tar" %(tempfile.mkdtemp())
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
return output_filename
def get_image_tar(image_path):
'''get an image tar, either written in memory or to
the file system. file_obj will either be the file object,
or the file itself.
'''
bot.debug('Generate file system tar...')
if 'version 3' in get_singularity_version():
sandbox = Client.export(image_path)
file_obj = create_tarfile(sandbox)
else:
file_obj = Client.image.export(image_path=image_path)
if file_obj is None:
bot.exit("Error generating tar, exiting.")
tar = tarfile.open(file_obj)
return file_obj, tar
def delete_image_tar(file_obj, tar):
'''delete image tar will close a file object (if extracted into
memory) or delete from the file system (if saved to disk)'''
try:
file_obj.close()
except:
tar.close()
if os.path.exists(file_obj):
os.remove(file_obj)
deleted = True
bot.debug('Deleted temporary tar.')
return deleted
def extract_content(member_name, return_hash=False):
'''extract_content will extract content from an image using cat.
If hash=True, a hash sum is returned instead
'''
if return_hash:
hashy = hashlib.md5()
# First try reading regular
try:
with open(member_name, 'r') as filey:
content = filey.read()
except:
# Then try binary
try:
with open(member_name, 'rb') as filey:
content = filey.read()
except:
return None
if not isinstance(content, bytes):
content = content.encode('utf-8')
content = bytes(content)
# If permissions don't allow read, return None
if len(content) == 0:
return None
if return_hash:
hashy.update(content)
return hashy.hexdigest()
return content
|
fxia22/ASM_xf
|
PythonD/site_python/twisted/test/test_http.py
|
Python
|
gpl-2.0
| 11,674
| 0.002227
|
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Test HTTP support."""
from __future__ import nested_scopes
from twisted.trial import unittest
from twisted.protocols import http, loopback
from twisted.internet import protocol
from twisted.test.test_protocols import StringIOWithoutClosing
import string, random
class DateTimeTest(unittest.TestCase):
"""Test date parsing functions."""
def testRoundtrip(self):
for i in range(10000):
time = random.randint(0, 2000000000)
timestr = http.datetimeToString(time)
time2 = http.stringToDatetime(timestr)
self.assertEquals(time, time2)
class OrderedDict:
def __init__(self, dict):
self.dict = dict
self.l = dict.keys()
def __setitem__(self, k, v):
self.l.append(k)
self.dict[k] = v
def __getitem__(self, k):
return self.dict[k]
def items(self):
result = []
for i in self.l:
result.append((i, self.dict[i]))
return result
def __getattr__(self, attr):
return getattr(self.dict, attr)
class DummyHTTPHandler(http.Request):
def process(self):
self.headers = OrderedDict(self.headers)
self.content.seek(0, 0)
data = self.content.read()
length = self.getHeader('content-length')
request = "'''\n"+str(length)+"\n"+data+"'''\n"
self.setResponseCode(200)
self.setHeader("Request", self.uri)
self.setHeader("Command", self.method)
self.setHeader("Version", self.clientproto)
self.setHeader("Content-Length", len(request))
self.write(request)
self.finish()
class LoopbackHTTPClient(http.HTTPClient):
def connectionMade(self):
self.sendCommand("GET", "/foo/bar")
self.sendHeader("Content-Length", 10)
self.endHeaders()
self.transport.write("0123456789")
class HTTP1_0TestCase(unittest.TestCase):
requests = '''\
GET / HTTP/1.0
GET / HTTP/1.1
Accept: text/html
'''
requests = string.replace(requests, '\n', '\r\n')
expected_response = "HTTP/1.0 200 OK\015\012Request: /\015\012Command: GET\015\012Version: HTTP/1.0\015\012Content-length: 13\015\012\015\012'''\012None\012'''\012"
def testBuffer(self):
b = StringIOWithoutClosing()
a = http.HTTPChannel()
a.requestFactory = DummyHTTPHandler
a.makeConnection(protocol.FileWrapper(b))
# one byte at a time, to stress it.
for byte in self.requests:
a.dataReceived(byte)
a.connectionLost(IOError("all one"))
value = b.getvalue()
if value != self.expected_response:
for i in range(len(value)):
if len(self.expected_response) <= i:
print `value[i-5:i+10]`, `self.expected_response[i-5:i+10]`
elif value[i] != self.expected_response[i]:
print `value[i-5:i+10]`, `self.expected_response[i-5:i+10]`
break
print '---VALUE---'
print repr(value)
print '---EXPECTED---'
print repr(self.expected_response)
raise AssertionError
class HTTP1_1TestCase(HTTP1_0TestCase):
requests = '''\
GET / HTTP/1.1
Accept: text/html
POST / HTTP/1.1
Content-Length: 10
0123456789POST / HTTP/1.1
Content-Length: 10
0123456789HEAD / HTTP/1.1
'''
requests = string.replace(requests, '\n', '\r\n')
expected_response = "HTTP/1.1 200 OK\015\012Request: /\015\012Command: GET\015\012Version: HTTP/1.1\015\012Content-length: 13\015\012\015\012'''\012None\012'''\012HTTP/1.1 200 OK\015\012Request: /\015\012Command: POST\015\012Version: HTTP/1.1\015\012Content-length: 21\015\012\015\012'''\01210\0120123456789'''\012HTTP/1.1 200 OK\015\012Request: /\015\012Command: POST\015\012Version: HTTP/1.1\015\012Content-length: 21\015\012\015\012'''\01210\0120123456789'''\012HTTP/1.1 200 OK\015\012Request: /\015\012Command: HEAD\015\012Version: HTTP/1.1\015\012Content-length: 13\015\012\015\012"
class HTTP1_1_close_TestCase(HTTP1_0TestCase):
requests = '''\
GET / HTTP/1.1
Accept: text/html
Connection: close
GET / HTTP/1.0
'''
requests = string.replace(requests, '\n', '\r\n')
expected_response = "HTTP/1.1 200 OK\015\012Connection: close\015\012Request: /\015\012Command: GET\015\012Version: HTTP/1.1\015\012Content-length: 13\015\012\015\012'''\012None\012'''\012"
class HTTP0_9TestCase(HTTP1_0TestCase):
requests = '''\
GET /
'''
requests = string.replace(requests, '\n', '\r\n')
expected_response = "HTTP/1.1 400 Bad Request\r\n\r\n"
class HTTPLoopbackTestCase(unittest.TestCase):
expectedHeaders = {'request' : '/foo/bar',
'command' : 'GET',
'version' : 'HTTP/1.0',
'content-length' : '21'}
numHeaders = 0
gotStatus = 0
gotResponse = 0
gotEndHeaders = 0
def _handleStatus(self, version, status, message):
self.gotStatus = 1
self.assertEquals(version, "HTTP/1.0")
self.assertEquals(status, "200")
def _handleResponse(self, data):
self.gotResponse = 1
self.assertEquals(data, "'''\n10\n0123456789'''\n")
def _handleHeader(self, key, value):
self.numHeaders = self.numHeaders + 1
self.assertEquals(self.expectedHeaders[string.lower(key)], value)
def _handleEndHeaders(self):
self.gotEndHeaders = 1
self.assertEquals(self.numHeaders, 4)
def testLoopback(self):
server = http.HTTPChannel()
server.requestFactory = DummyHTTPHandler
client = LoopbackHTTPClient()
client.handleResponse = self._handleResponse
client.handleHeader = self._handleHeader
client.handleEndHeaders = self._handleEndHeaders
client.handleStatus = self._handleStatus
loopback.loopback(server, client)
if not (self.gotStatus and self.gotResponse and self.gotEndHeaders):
|
raise RuntimeError,
|
"didn't got all callbacks %s" % [self.gotStatus, self.gotResponse, self.gotEndHeaders]
del self.gotEndHeaders
del self.gotResponse
del self.gotStatus
del self.numHeaders
class PRequest:
"""Dummy request for persistence tests."""
def __init__(self, **headers):
self.received_headers = headers
self.headers = {}
def getHeader(self, k):
return self.received_headers.get(k, '')
def setHeader(self, k, v):
self.headers[k] = v
class PersistenceTestCase(unittest.TestCase):
"""Tests for persistent HTTP connections."""
ptests = [#(PRequest(connection="Keep-Alive"), "HTTP/1.0", 1, {'connection' : 'Keep-Alive'}),
(PRequest(), "HTTP/1.0", 0, {'connection': None}),
(PRequest(connection="close"), "HTTP/1.1", 0, {'connection' : 'close'}),
(PRequest(), "HTTP/1.1", 1, {'connection': None}),
(PRequest(), "HTTP/0.9", 0, {'connection': None}),
]
def testAlgorithm(self):
c = http.HTTPChannel()
for req, version, correctResult, resultHeaders in self.ptests:
result = c.checkPersistence(req, version)
self.assertEquals(result, correctResult)
for header in resultHeaders.keys():
self.assertEquals(req.headers.get(header, None), resultHeaders[header])
class ChunkingTestCase(unittest.TestCase):
strings = ["abcv", "", "fdfsd423",
|
hipnusleo/laserjet
|
resource/pypi/cryptography-1.7.1/src/cryptography/hazmat/primitives/asymmetric/utils.py
|
Python
|
apache-2.0
| 2,460
| 0
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import warnings
from pyasn1.codec.der import decoder, encoder
from pyasn1.error import PyAsn1Error
from pyasn1.type import namedtype, univ
import six
from cryptography import utils
from cryptography.hazmat.primitives import hashes
class _DSSSigValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('r', univ.Integer()),
namedtype.NamedType('s', univ.Integer())
)
def decode_rfc6979_signature(signature):
warnings.warn(
"decode_rfc6979_signature is deprecated and will "
"be removed in a future version, use decode_dss_signature instead.",
utils.DeprecatedIn10,
stacklevel=2
)
return decode_dss_signature(signature)
def decode_dss_signature(signature):
try:
data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())
except PyAsn1Error:
|
raise
|
ValueError("Invalid signature data. Unable to decode ASN.1")
if remaining:
raise ValueError(
"The signature contains bytes after the end of the ASN.1 sequence."
)
r = int(data.getComponentByName('r'))
s = int(data.getComponentByName('s'))
return (r, s)
def encode_rfc6979_signature(r, s):
warnings.warn(
"encode_rfc6979_signature is deprecated and will "
"be removed in a future version, use encode_dss_signature instead.",
utils.DeprecatedIn10,
stacklevel=2
)
return encode_dss_signature(r, s)
def encode_dss_signature(r, s):
if (
not isinstance(r, six.integer_types) or
not isinstance(s, six.integer_types)
):
raise ValueError("Both r and s must be integers")
sig = _DSSSigValue()
sig.setComponentByName('r', r)
sig.setComponentByName('s', s)
return encoder.encode(sig)
class Prehashed(object):
def __init__(self, algorithm):
if not isinstance(algorithm, hashes.HashAlgorithm):
raise TypeError("Expected instance of HashAlgorithm.")
self._algorithm = algorithm
self._digest_size = algorithm.digest_size
digest_size = utils.read_only_property("_digest_size")
|
Bootz/multicore-opimization
|
llvm/tools/clang/utils/analyzer/SATestAdd.py
|
Python
|
gpl-3.0
| 2,950
| 0.008136
|
#!/usr/bin/env python
"""
Static Analyzer qualification infrastructure: adding a new project to
the Repository Directory.
Add a new project for testing: build it and add to the Project Map file.
Assumes it's being run from the Repository Directory.
The project directory should be added inside the Repository Directory and
have the same name as the project ID
The project should use the following files for set up:
- pre_run_static_analyzer.sh - prepare the build environment.
Ex: make clean can be a part of it.
- run_static_analyzer.cmd - a list of commands to run through scan-build.
Each command should be on a separate line.
Choose from: configure, make, xcodebuild
"""
import SATes
|
tBuild
import os
import csv
import sys
def isExistingProject(PMapFile, projectID) :
PMapReader = csv.reader(PMapFile)
for I in PMapReader:
if projectID == I[0]:
return True
return False
# Add a new project for testing: build it and add to the Project Map file.
# Params:
# Dir is the directory where the sources are.
# ID is a short string used to identify a project.
def addNewProject(ID,
|
IsScanBuild) :
CurDir = os.path.abspath(os.curdir)
Dir = SATestBuild.getProjectDir(ID)
if not os.path.exists(Dir):
print "Error: Project directory is missing: %s" % Dir
sys.exit(-1)
# Build the project.
SATestBuild.testProject(ID, True, IsScanBuild, Dir)
# Add the project ID to the project map.
ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
if os.path.exists(ProjectMapPath):
PMapFile = open(ProjectMapPath, "r+b")
else:
print "Warning: Creating the Project Map file!!"
PMapFile = open(ProjectMapPath, "w+b")
try:
if (isExistingProject(PMapFile, ID)) :
print >> sys.stdout, 'Warning: Project with ID \'', ID, \
'\' already exists.'
print >> sys.stdout, "Reference output has been regenerated."
else:
PMapWriter = csv.writer(PMapFile)
PMapWriter.writerow( (ID, int(IsScanBuild)) );
print "The project map is updated: ", ProjectMapPath
finally:
PMapFile.close()
# TODO: Add an option not to build.
# TODO: Set the path to the Repository directory.
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'project_ID <mode>' \
'mode - 0 for single file project; 1 for scan_build'
sys.exit(-1)
IsScanBuild = 1
if (len(sys.argv) >= 3):
IsScanBuild = int(sys.argv[2])
assert((IsScanBuild == 0) | (IsScanBuild == 1))
addNewProject(sys.argv[1], IsScanBuild)
|
quantumlib/Cirq
|
cirq-google/cirq_google/ops/physical_z_tag_test.py
|
Python
|
apache-2.0
| 1,084
| 0.000923
|
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
|
software
# distributed under the License is distributed on an "AS IS" BASIS
|
,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
import cirq_google
def test_equality():
assert cirq_google.PhysicalZTag() == cirq_google.PhysicalZTag()
assert hash(cirq_google.PhysicalZTag()) == hash(cirq_google.PhysicalZTag())
def test_syc_str_repr():
assert str(cirq_google.PhysicalZTag()) == 'PhysicalZTag()'
assert repr(cirq_google.PhysicalZTag()) == 'cirq_google.PhysicalZTag()'
cirq.testing.assert_equivalent_repr(
cirq_google.PhysicalZTag(), setup_code=('import cirq\nimport cirq_google\n')
)
|
jorisvandenbossche/DS-python-data-analysis
|
notebooks/_solutions/case2_biodiversity_processing11.py
|
Python
|
bsd-3-clause
| 133
| 0.015038
|
sur
|
vey_data_decoupled.groupby(survey_data_decoupled["eve
|
ntDate"].dt.year).size().plot(kind='barh', color="#00007f", figsize=(10, 10))
|
Havate/havate-openstack
|
proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/admin/info/views.py
|
Python
|
apache-2.0
| 1,045
| 0
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the Licen
|
se at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either expre
|
ss or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from horizon import tabs
from openstack_dashboard.dashboards.admin.info import tabs as project_tabs
class IndexView(tabs.TabbedTableView):
tab_group_class = project_tabs.SystemInfoTabs
template_name = 'admin/info/index.html'
|
alihalabyah/grab
|
test/spider_mysql_cache.py
|
Python
|
mit
| 346
| 0
|
from unittest import TestCase
from .mixin.spider_cache import SpiderCacheMixin
class BasicSpiderTestCase(TestCase, SpiderCacheMixin):
def setUp(self):
SpiderCac
|
heMixin.setUp(self)
def setup_cache(self, bot):
bot.setup_cache(backend='mysql', database='spider_test',
user='web', passwd=
|
'web-**')
|
astooke/synkhronos
|
tests/get_set_value_lengths.py
|
Python
|
mit
| 578
| 0
|
import synkhronos as synk
import numpy as np
import theano
synk.fork()
s = theano.shared(np.ones([5, 5], dtype='float32'), name="shared_var")
s2 = theano.shared(np.ones([4, 4], dtype='float32'), name="shared_var_2")
f = synk.function([], [s.dot(s), s2.dot(s2)])
synk.distribute()
# print(f())
#
|
print(synk.get_value(1, s))
# d = 2 * np.ones([5, 5], dtype='float32')
# synk.set_value(1, s, d)
d55 = np.array(list(range(5 * 5)), dtype='float32').reshape(5, 5)
d64 = np.array(list(range(6 * 4)), dtype='float32').reshape(6, 4)
# (run interactive in iPython for se
|
tup)
|
to266/hyperspy
|
examples/hyperspy_as_library/minimal_example.py
|
Python
|
gpl-3.0
| 280
| 0.003571
|
""" Loads hyperspy as a regular python library, creates a spectrum with random numbers and
|
plots it to a file"""
import hyperspy.api as hs
import numpy as np
import matplotlib.pyplot as plt
s = hs.signals.Spectrum(np.random.rand(1024))
s.plot()
plt.savefig("testS
|
pectrum.png")
|
Lothiraldan/ZeroServices
|
zeroservices/exceptions.py
|
Python
|
mit
| 434
| 0.009217
|
class ServiceUnavailable(Exception):
pass
class UnknownNode(Exception):
pass
class UnknownService(Exception):
pass
class ResourceException(Exception):
def __init__(self, error_message):
self.error_message = error
|
_message
def __str__(self):
return self.__repr__()
def __repr__(self):
return "ResourceException(%s)" % self.error_message
class ResourceNotFound(Exception):
|
pass
|
ibayer/fastFM-fork
|
fastFM/bpr.py
|
Python
|
bsd-3-clause
| 2,859
| 0
|
# Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
class FMRecommender(FactorizationMachine):
""" Factorization Machine Recommender with pairwise (BPR) loss solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "ranking"
def fit(self, X, pairs):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a
|
high value then the second
|
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self
|
saschpe/gnome_picross
|
gnomepicross/game.py
|
Python
|
gpl-2.0
| 5,836
| 0.035127
|
#!/usr/bin/env python
#
# Copyright (C) 2007 Sascha Peilicke <sasch.pe@gmx.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
from random import randrange
from zipfile import ZipFile
from StringIO import StringIO
# Constants
DEFAULT_LEVELPACK = './data/default_pack.zip'
SKILL_EASY = 'Easy' # These values should match the
SKILL_MEDIUM = 'Medium' # the level files!
SKILL_HARD = 'Hard'
FIELD_INVALID = 0 # Constants describing a field on
FIELD_VALID = 1 # the playfield
FIELD_MARKED_VALID = 2
FIELD_MARKED_INVALID = 4
FIELD_OPEN = 8
class Game(object):
"""A paint by numbers game also called nonogram.
"""
def __init__(self, skill=None):
"""Creates a picross game.
Parameters:
skill - Desired skill level (None == random)
"""
self.__level = None
self.__name = None
self.__skill = None
self.__fieldsToOpen = 0
self.__fieldsOpened = 0
self.load(skill=skill)
#
# Miscellaneous methods
#
def _debug_print(self):
print self.getInfo()
print 'go: %s' % (self.__gameOver)
for row in self.__level:
print row
#
# Game information retrieval
#
def getInfo(self):
"""Returns the name, skill and size of the level
"""
return self.__name,self.__skill,len(self.__level)
def getRowHint(self,row):
"""Returns the hint for a specific row.
"""
hint,count = [],0
for columnItem in self.__level[row]:
if columnItem == FIELD_VALID:
count += 1
else:
if count > 0:
hint.append(count)
count = 0
if count > 0:
hint.append(count)
if not hint:
hint.append(0)
return hint
def getColumnHint(self,col):
"""Returns the hint for a specific column.
"""
hint,count = [],0
for row in sel
|
f.__level:
if row[col] == FIELD_VALID:
count += 1
else:
if count > 0:
hint.append(count)
count = 0
if count > 0:
hint.append(count)
if not hint:
hint.append(0)
retu
|
rn hint
def getField(self,col,row):
return self.__level[row][col]
def isGameWon(self):
return self.__fieldsOpened == self.__fieldsToOpen
#
# Game manipulation methods
#
def restart(self):
"""Reinitializes the current game
"""
for i, row in enumerate(self.__level):
for j, field in enumerate(row):
if field == FIELD_OPEN or field == FIELD_MARKED_VALID:
self.__level[i][j] = FIELD_VALID
elif field == FIELD_MARKED_INVALID:
self.__level[i][j] = FIELD_INVALID
self.__gameOver = False
self.__fieldsOpened = 0
def openField(self,col,row):
field = self.__level[row][col]
if field == FIELD_VALID or field == FIELD_MARKED_VALID:
self.__level[row][col] = FIELD_OPEN
self.__fieldsOpened += 1
return True
else:
return False
def markField(self,col,row):
field = self.__level[row][col]
if field == FIELD_VALID:
self.__level[row][col] = FIELD_MARKED_VALID
elif field == FIELD_MARKED_VALID:
self.__level[row][col] = FIELD_VALID
elif field == FIELD_INVALID:
self.__level[row][col] = FIELD_MARKED_INVALID
elif field == FIELD_MARKED_INVALID:
self.__level[row][col] = FIELD_INVALID
return self.__level[row][col]
def load(self,file=DEFAULT_LEVELPACK,skill=None):
"""Loads a level either from a zipped levelpack or from a textfile.
Parameters:
file - Can be a file path or zipped levelpack
skill - Desired level skill (None == random)
"""
if file.endswith('.lvl'):
# Set the skill variable
if file.startswith('easy'): self.__skill = SKILL_EASY
elif file.startswith('medium'): self.__skill = SKILL_MEDIUM
elif file.startswith('hard'): self.__skill = SKILL_HARD
self.__loadFileContent(open(file,'r'))
elif file.endswith('.zip'):
zip = ZipFile(file)
# We have to select from which files in the zipfile we
# want to choose randomly based on the level's skill
candidates = []
if skill == SKILL_EASY:
for file in zip.namelist():
if file.startswith('easy'):
candidates.append(file)
elif skill == SKILL_MEDIUM:
for file in zip.namelist():
if file.startswith('medium'):
candidates.append(file)
elif skill == SKILL_HARD:
for file in zip.namelist():
if file.startswith('hard'):
candidates.append(file)
# This should never happen in a good levelpack, but if it
# is malformed, just pick something!
if not candidates:
candidates = zip.namelist()
# Select one candidate randomly
which = candidates[randrange(len(candidates))]
# Set the skill variable
if which.startswith('easy'): self.__skill = SKILL_EASY
elif which.startswith('medium'):self.__skill = SKILL_MEDIUM
elif which.startswith('hard'): self.__skill = SKILL_HARD
# Read from zipfile and load file content
buf = zip.read(which)
self.__loadFileContent(StringIO(buf))
def __loadFileContent(self,file):
"""Actually loads the level data from a file.
"""
self.__level = []
for line in file:
if line.startswith('name:'):
self.__name = line[5:].strip()
elif line[0] == '0' or line[0] == '1':
row = []
for field in line:
if field == '0':
row.append(FIELD_INVALID)
elif field == '1':
self.__fieldsToOpen += 1
row.append(FIELD_VALID)
self.__level.append(row)
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_template.py
|
Python
|
gpl-3.0
| 10,716
| 0.001027
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_template
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower job template.
description:
- Create, update, or destroy Ansible Tower job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the job template.
required: True
description:
description:
- Description to use for the job template.
job_type:
description:
- The job type to use for the job template.
required: True
choices: ["run", "check", "scan"]
inventory:
description:
- Name of the inventory to use for the job template.
project:
description:
- Name of the project to use for the job template.
required: True
playbook:
description:
- Path to the playbook to use for the job template within the project provided.
required: True
credential:
description:
- Name of the credential to use for the job template.
version_added: 2.7
vault_credential:
description:
- Name of the vault credential to use for the job template.
version_added: 2.7
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook.
limit:
description:
- A host pattern to further constrain the list of hosts managed or affected by the playbook
verbosity:
description:
- Control the output level Ansible produces as the playbook runs. 0 - Normal, 1 - Verbose, 2 - More Verbose, 3 - Debug, 4 - Connection Debug.
choices: [0, 1, 2, 3, 4]
default: 0
extra_vars_path:
description:
- Path to the C(extra_vars) YAML file.
job_tags:
description:
- Comma separated list of the tags to use for the job template.
force_handlers_enabled:
description:
- Enable forcing playbook handlers to run even if a task fails.
version_added: 2.7
type: bool
default: 'no'
skip_tags:
description:
- Comma separated list of the tags to skip for the job template.
start_at_task:
description:
- Start the playbook at the task matching this name.
version_added: 2.7
fact_caching_enabled:
description:
- Enable use of fact caching for the job template.
version_added: 2.7
type: bool
default: 'no'
host_config_key:
description:
- Allow provisioning callbacks using this host config key.
ask_diff_mode:
description:
- Prompt user to enable diff mode (show changes) to files when supported by modules.
version_added: 2.7
type: bool
default: 'no'
ask_extra_vars:
description:
- Prompt user for (extra_vars) on launch.
type: bool
default: 'no'
ask_limit:
description:
- Prompt user for a limit on launch.
version_added: 2.7
type: bool
default: 'no'
|
ask_tags:
description:
- Prompt user for job tags on launch.
type: bool
default: 'no'
ask_skip_tags:
description:
- Prompt user for job tags to skip on launch.
|
version_added: 2.7
type: bool
default: 'no'
ask_job_type:
description:
- Prompt user for job type on launch.
type: bool
default: 'no'
ask_verbosity:
description:
- Prompt user to choose a verbosity level on launch.
version_added: 2.7
type: bool
default: 'no'
ask_inventory:
description:
- Propmt user for inventory on launch.
type: bool
default: 'no'
ask_credential:
description:
- Prompt user for credential on launch.
type: bool
default: 'no'
survey_enabled:
description:
- Enable a survey on the job template.
version_added: 2.7
type: bool
default: 'no'
survey_spec:
description:
- JSON/YAML dict formatted survey definition.
version_added: 2.8
type: dict
required: False
become_enabled:
description:
- Activate privilege escalation.
type: bool
default: 'no'
concurrent_jobs_enabled:
description:
- Allow simultaneous runs of the job template.
version_added: 2.7
type: bool
default: 'no'
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
notes:
- JSON for survey_spec can be found in Tower API Documentation. See
U(https://docs.ansible.com/ansible-tower/latest/html/towerapi/api_ref.html#/Job_Templates/Job_Templates_job_templates_survey_spec_create)
for POST operation payload example.
'''
EXAMPLES = '''
- name: Create tower Ping job template
tower_job_template:
name: "Ping"
job_type: "run"
inventory: "Local"
project: "Demo"
playbook: "ping.yml"
credential: "Local"
state: "present"
tower_config_file: "~/tower_cli.cfg"
survey_enabled: yes
survey_spec: "{{ lookup('file', 'my_survey.json') }}"
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def update_fields(p):
'''This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier.
'''
params = p.copy()
field_map = {
'fact_caching_enabled': 'use_fact_cache',
'ask_diff_mode': 'ask_diff_mode_on_launch',
'ask_extra_vars': 'ask_variables_on_launch',
'ask_limit': 'ask_limit_on_launch',
'ask_tags': 'ask_tags_on_launch',
'ask_skip_tags': 'ask_skip_tags_on_launch',
'ask_verbosity': 'ask_verbosity_on_launch',
'ask_inventory': 'ask_inventory_on_launch',
'ask_credential': 'ask_credential_on_launch',
'ask_job_type': 'ask_job_type_on_launch',
'diff_mode_enabled': 'diff_mode',
'concurrent_jobs_enabled': 'allow_simultaneous',
'force_handlers_enabled': 'force_handlers',
}
params_update = {}
for old_k, new_k in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars_path')
if extra_vars is not None:
params_update['extra_vars'] = ['@' + extra_vars]
params.update(params_update)
return params
def update_resources(module, p):
params = p.copy()
identity_map = {
'project': 'name',
'inventory': 'name',
'credential': 'name',
'vault_credential': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'credential' if '_credential' in k else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
elif k in params:
# unset empty parameters to avoid ValueError: invalid literal for int() with base 10: ''
del(params[k])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
return params
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(default=''),
job_type=dict(choices=['run', 'check', 'scan'], required=True),
inventory=dict(default=''),
project=dict(required=True),
playbook=dict(required=
|
tgquintela/Mscthesis
|
script_computation.py
|
Python
|
mit
| 836
| 0.008373
|
from FirmsLocations.Computers.precomputers import PrecomputerCollection
from FirmsLocations.Computers.computers import Directmodel, LocationOnlyModel,\
LocationGeneralModel
## Pathpameters
# Set path parameters
|
execfile('set_pathparameters.py')
# Set precomputation parameters
execfile('set_precomputationparameters.py')
# Set computation paramters
execfile('set_computationparameters.py')
### Data precomputation
precomps = PrecomputerCollection(logfile, pathfolder, old_computed=True)
### Models
#dirmodel = Directmodel(logfile, pathfolder, precomps, num_cores=2)
#dirmodel.compute(pars_directmodel)
locmodel = LocationOnlyModel(logfile,
|
pathfolder, precomps, num_cores=1)
locmodel.compute(pars_loconly_model)
#
#locgeneralmodel = LocationGeneralModel(logfile, pathfolder, precomps)
#locgeneralmodel.compute(pars_loc_model)
|
bowen0701/algorithms_data_structures
|
lc0695_max_area_of_island.py
|
Python
|
bsd-2-clause
| 4,625
| 0.045622
|
"""Leetcode 695. Max Area of Island
Medium
URL: https://leetcode.com/problems/max-area-of-island/
Given a non-empty 2D array grid of 0's and 1's, an island is a group of 1's
(representing land) connected 4-directionally (horizontal or vertical.)
You may assume all four edges of the grid are surrounded by water.
Find the maximum area of an island in the given 2D array.
(If there is no island, the maximum area is 0.)
Example 1:
[[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
Given the above grid, return 6. Note the answer is not 11,
because the island must be connected 4-directionally.
Example 2:
[[0,0,0,0,0,0,0,0]]
Given the above grid, return 0.
Note: The length of each dimension in the given grid does not exceed 50.
"""
class SolutionDFSRecurUpdate(object):
def _dfs(self, r, c, grid):
# Check exit conditions: out of boundaries, in water.
if (r < 0 or r >= len(grid) or c < 0 or c >= len(grid[0]) or
grid[r][c] == 0):
return 0
# Mark (r, c) as visited.
grid[r][c] = 0
area = 1
# Visit 4 directions to accumulate area.
dirs = [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]
for r_next, c_next in dirs:
area += self._dfs(r_next, c_next, grid)
return area
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
Time complexity: O(m*n).
Space complexity: O(m*n).
"""
if not grid or not grid[0]:
return 0
max_area = 0
for r in range(len(grid)):
for c in range(len(grid[0])):
if grid[r][c] == 1:
area = self._dfs(r, c, grid)
max_area = max(max_area, area)
return max_area
class SolutionDFSIterUpdate(object):
def _get_tovisit_ls(self, v_start, grid):
r, c = v_start
tovisit_ls = []
# Visit up, down, left and right.
dirs = [(r - 1, c), (r + 1, c), (r, c - 1), (r, c + 1)]
for r_next, c_next in dirs:
if (0 <= r_next < len(grid) and
0 <= c_next < len(grid[0]) and
grid[r_next][c_next] == 1):
tovisit_ls.append((r_next, c_next))
return tovisit_ls
def _dfs(self, r, c, grid):
grid[r][c] = 0
# Use stack for DFS.
stack = [(r, c)]
area = 1
while stack:
# Get to-visit nodes from the top of stack.
tovisit_ls = self._get_tovisit_ls(stack[-1], grid)
if tovisit_ls:
for r_next, c_next in tovisit_ls:
grid[r_next][c_next] = 0
area += 1
stack.append((r_next, c_next))
# Break to continue DFS.
break
else:
stack.pop()
return area
def maxAreaOfIsland(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
Time complexity: O(m*n).
Space complexity: O(m*n).
"""
if not grid or not grid[0]:
return 0
max_area = 0
for r in range(len(grid)):
for c in range(len(grid[0])):
if grid[r][c] == 1:
area = self._dfs(r, c, grid)
max_area = max(max_area, area)
return max_area
def main():
# Output: 6
grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
print SolutionDFSRecurUpdate().maxAreaOfIsland(grid)
grid = [[0,0,1,0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,1,1,0,1,0,0,0,0,0,0,0,0],
[0,1,0,0,1,1,0,0,1,0,1,0,0],
[0,1,0,0,1,1,0,0,1,1,1,0,0],
|
[0,0,0,0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,1,1,1,0,0,0],
[0,0,0,0,0,0,0,1,1,0,0,0,0]]
print SolutionDFSIterUpd
|
ate().maxAreaOfIsland(grid)
# Output: 0.
grid = [[0,0,0,0,0,0,0,0]]
print SolutionDFSRecurUpdate().maxAreaOfIsland(grid)
grid = [[0,0,0,0,0,0,0,0]]
print SolutionDFSIterUpdate().maxAreaOfIsland(grid)
if __name__ == '__main__':
main()
|
abrt/faf
|
src/pyfaf/actions/addcompathashes.py
|
Python
|
gpl-3.0
| 5,960
| 0.001174
|
# Copyright (C) 2014 ABRT Team
# Copyright (C) 2014 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from pyfaf.actions import Action
from pyfaf.common import FafError
from pyfaf.problemtypes import problemtypes
from pyfaf.queries import get_reports_by_type, get_report
from pyfaf.storage import ReportHash
from pyfaf.utils.hash import hash_list
class AddCompatHashes(Action):
name = "addcompathashes"
def _unmap_offset(self, offset) -> int:
if offset < 0:
offset += 1 << 63
return offset
def _hash_backtrace(self, db_backtrace, hashbase=None, offset=False) -> str:
if hashbase is None:
hashbase = []
crashthreads = [t for t in db_backtrace.threads if t.crashthread]
if not crashthreads:
raise FafError("No crash thread found")
if len(crashthreads) > 1:
raise FafError("Multiple crash threads found")
frames = [f for f in crashthreads[0].frames if not f.inlined][:16]
has_names = all(f.symbolsource.symbol is not None and
f.symbolsource.symbol.name is not None and
f.symbolsource.symbol.name != "??" for f in frames)
has_hashes = all(f.symbolsource.hash is not None for f in frames)
# use function names if available
if has_names:
# also hash offset for reports that use it as line numbers
# these reports always have function names
if offset:
hashbase.extend(["{0} @ {1} + {2}"
.format(f.symbolsource.symbol.name,
f.symbolsource.path,
f.symbolsource.offset) for f in frames])
else:
hashbase.extend(["{0} @ {1}"
.format(f.symbolsource.symbol.name,
f.symbolsource.path) for f in frames])
# fallback to hashes
elif has_hashes:
hashbase.extend(["{0} @ {1}"
.format(f.symbolsource.hash,
f.symbolsource.path) for f in frames])
else:
raise FafError("either function names or hashes are required")
return hash_list(hashbase)
def run(self, cmdline, db) -> int:
if cmdline.problemtype is None or not cmdline.probl
|
emtype:
ptypes = list(problemtypes.keys())
else:
ptypes = []
for ptype in cmdline.problemtype:
if ptype not in problemtypes:
self.log_warn("Problem type '{0}' is not supported"
|
.format(ptype))
continue
ptypes.append(ptype)
if not ptypes:
self.log_info("Nothing to do")
return 1
for i, ptype in enumerate(ptypes, start=1):
problemtype = problemtypes[ptype]
self.log_info("[{0} / {1}] Processing problem type '{2}'"
.format(i, len(ptypes), problemtype.nice_name))
db_reports = get_reports_by_type(db, ptype)
for j, db_report in enumerate(db_reports, start=1):
self.log_info(" [{0} / {1}] Processing report #{2}"
.format(j, len(db_reports), db_report.id))
hashes = set()
for k, db_backtrace in enumerate(db_report.backtraces, start=1):
self.log_debug("\t[%d / %d] Processing backtrace #%d",
k, len(db_report.backtraces), db_backtrace.id)
try:
component = db_report.component.name
include_offset = ptype.lower() == "python"
bthash = self._hash_backtrace(db_backtrace,
hashbase=[component],
offset=include_offset)
self.log_debug("\t%s", bthash)
db_dup = get_report(db, bthash)
if db_dup is None:
self.log_info(" Adding hash '{0}'"
.format(bthash))
if not bthash in hashes:
db_reporthash = ReportHash()
db_reporthash.report = db_report
db_reporthash.hash = bthash
db.session.add(db_reporthash)
hashes.add(bthash)
elif db_dup == db_report:
self.log_debug("\tHash '%s' already assigned", bthash)
else:
self.log_warn((" Conflict! Skipping hash '{0}'"
" (report #{1})").format(bthash,
db_dup.id))
except FafError as ex:
self.log_warn(" {0}".format(str(ex)))
continue
db.session.flush()
return 0
def tweak_cmdline_parser(self, parser) -> None:
parser.add_problemtype(multiple=True)
|
jeremyphilemon/uniqna
|
api/tests/test_models.py
|
Python
|
bsd-3-clause
| 459
| 0.021786
|
from django.test import TestCase
from api.models import UsernameSnippet
class TestUsernameSnippet(TestCas
|
e):
@classmethod
def setUpTestData(cls):
UsernameSnippet.objects.create(available=True)
def test_existence(self):
u = UsernameSnippet.ob
|
jects.first()
self.assertIsInstance(u, UsernameSnippet)
self.assertEqual(u.available, True)
def test_field_types(self):
u = UsernameSnippet.objects.first()
self.assertIsInstance(u.available, bool)
|
clolsonus/madesigner
|
madesigner/madlib/contour.py
|
Python
|
gpl-3.0
| 28,367
| 0.007791
|
#!python
__author__ = "Curtis L. Olson < curtolson {at} flightgear {dot} org >"
__url__ = "http://gallinazo.flightgear.org"
__version__ = "1.0"
__license__ = "GPL v2"
import fileinput
import math
import string
from . import spline
import Polygon
import Polygon.Shapes
import Polygon.Utils
class Cutpos:
def __init__(self, percent=None, front=None, rear=None, xpos=None,
xoffset=0.0, atstation=None, slope=None):
self.percent = percent # placed at % point in chord
self.front = front # dist from front of chord
self.rear = rear # dist from rear of chord
self.xpos = xpos # abs position
# if xoffset is provided, nudge the x position by this amount after
# computing the relative position the normal way
self.xoffset = xoffset
# if atstation + slope are defined, then the cut position will
# be just like any other cut position at the 'root' station,
# but offset by dist+slope for any other station. This allows
# straight stringers or aileron cut at arbitray angles
# relative to the rest of the wing.
self.atstation = atstation
self.slope = slope
# move the cutpos by dist amount
def move(self, xoffset=0.0):
self.xoffset += xoffset
class Cutout:
def __init__(self, surf="top", orientation="tangent", cutpos=None, \
xsize=0.0, ysize=0.0):
# note: specify a value for only one of percent, front, rear, or xpos
self.surf = surf # {top, bottom}
self.orientation = orientation # {tangent, vertical}
self.xsize = xsize # horizontal size
self.ysize = ysize # vertical size
self.cutpos = cutpos # Cutpos()
class Contour:
def __init__(self):
self.name = ""
self.description = ""
self.top = []
self.bottom = []
self.poly = None
self.cut_lines = [] # extra cut lines (maybe internal)
self.labels = []
self.saved_bounds = [] # see self.save_bounds() for details
def dist_2d(self, pt1, pt2):
result = 0.0
if pt1[0] != None and pt1[1] != None:
dx = pt2[0]-pt1[0]
dy = pt2[1]-pt1[1]
result = math.sqrt(dx*dx + dy*dy)
return result
def simple_interp(self, points, v):
if v < points[0][0]:
return None
if v > points[len(points)-1][0]:
return None
index = spline.binsearch(points, v)
n = len(points) - 1
if index < n:
xrange = points[index+1][0] - points[index][0]
yrange = points[index+1][1] - points[index][1]
# print(" xrange = $xrange\n")
if xrange > 0.0001:
percent = (v - points[index][0]) / xrange
# print(" percent = $percent\n")
return points[index][1] + percent * yrange
else:
return points[index][1]
else:
return points[index][1]
def poly_intersect(self, surf="top", xpos=0.0):
if self.poly == None:
self.make_poly()
ymin = None
ymax = None
#print "poly contours = " + str(len(self.poly))
for index, contour in enumerate(self.poly):
#print "contour " + str(index) + " = " + str(contour)
p0 = contour[len(contour)-1]
for p1 in contour:
#print " p1 = " + str(p1) + " xpos = " + str(xpos)
if p0 != None:
if p0[0] < p1[0]:
a = p0
b = p1
else:
a = p1
b = p0
if a[0] <= xpos and b[0] >= xpos:
#print "found a spanning segment!"
# a & b span xpos
xrange = b[0] - a[0]
yrange = b[1] - a[1]
if xrange > 0.0001:
percent = (xpos - a[0]) / xrange
ypos= a[1] + percent * yrange
else:
ypos = a[1]
if ymin == None or ypos < ymin:
ymin = ypos
if ymax == None or ypos > ymax:
ymax = ypos
p0 = p1
if surf == "top":
return ymax
else:
return ymin
def fit(self, maxpts = 30, maxerror = 0.1):
self.top = list( self.curve_fit(self.top, maxpts, maxerror) )
self.bottom = list( self.curve_fit(self.bottom, maxpts, maxerror) )
def curve_fit(self, curve, maxpts = 30, maxerror = 0.1):
wip = []
# start with the end points
n = len(curve)
wip.append( curve[0] )
wip.append( curve[n-1] )
# iterate until termination conditions are met
done = False
while not done:
maxy = 0
maxx = 0
maxdiff = 0
maxi = -1
# iterate over the orginal interior points
for i in range(1, n-1):
pt = curve[i]
iy = self.simple_interp(wip, pt[0])
diff = math.fabs(pt[1] - iy)
if diff > maxdiff and diff > maxerror:
maxdiff = diff
maxi = i
maxx = pt[0]
maxy = pt[1]
if maxi > -1:
# found a match for a furthest off point
#print "($#wipx) inserting -> $maxx , $maxy at pos ";
# find insertion point
pos = 0
wipn = len(wip)
#print str(pos) + " " + str(wipn)
while pos < wipn and maxx > wip[pos][0]:
pos += 1
#print pos
#print "$pos\n";
wip.insert( pos, (maxx, maxy) )
else:
done = True
if len(wip) >= maxpts:
done = True
return wip
def display(self):
tmp = list(self.top)
tmp.reverse()
for pt in tmp:
print(str(pt[0]) + " "
|
+ str(pt[1]))
for pt in self.bottom:
print(str(pt[0]) + " " + str(pt[1]))
# rotate a point about (0, 0)
def rotate_point( self, pt, angle ):
rad = math.radians(angle)
newx = pt[0] * math.cos(rad) - pt[1] * math.sin(rad)
newy = pt[1] * math.cos(rad) + pt[0] * math.sin(rad)
return (newx, newy)
def rotat
|
e(self, angle):
newtop = []
for pt in self.top:
newtop.append( self.rotate_point(pt, angle) )
self.top = list(newtop)
newbottom = []
for pt in self.bottom:
newbottom.append( self.rotate_point(pt, angle) )
self.bottom = list(newbottom)
newlabels = []
for label in self.labels:
newpt = self.rotate_point( (label[0], label[1]), angle)
newlabels.append( (newpt[0], newpt[1], label[2], label[3] + angle, label[4]) )
self.labels = list(newlabels)
if len(self.saved_bounds) > 0:
newbounds = []
for pt in self.saved_bounds:
newbounds.append( self.rotate_point(pt, angle) )
self.saved_bounds = list(newbounds)
if self.poly != None:
self.poly.rotate(math.radians(angle), 0.0, 0.0)
def scale(self, hsize, vsize):
newtop = []
newbottom = []
newlabels = []
for pt in self.top:
newx = pt[0] * hsize
newy = pt[1] * vsize
newtop.append( (newx, newy) )
for pt in self.bottom:
newx = pt[0] * hsize
newy = pt[1] * vsize
newbottom.append( (newx, newy) )
for label in self.labels:
newx = ( label[0] * hsize )
newy = ( label[1] * vsize )
newlabels.append( (newx, newy, label[2], label[3], label[4]) )
|
Tony-Tsoi/proj-euler-ans
|
problems/prob044.py
|
Python
|
mit
| 1,103
| 0.012762
|
"""
Problem 44
Pentagonal numbers are generated by the formula, Pn = n*(3n−1)/2. The first ten pentagonal numbers are:
1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ...
It can be seen that P4 + P7 = 22 + 70 = 92 = P8. However, their difference, 70 − 22 = 48, is not pentagonal.
Find the pair of pentagonal numbers, Pj and Pk, for which their sum and difference are pentagonal and D = |Pk − Pj| is minimised; what is the value of D?
"""
# brute force
def isPent(n, S):
"""
n: an int
S: a set
"""
if len(S) is 0:
setSet(1, 100, S)
while S[-1] < 2 * n:
setSet(len(S)+1, len(S)+10, S)
return n in S
def setSet(i, j, S):
"""
i: an int
j: an int
S: a set
|
"""
for x in range(i, j):
Pn = int( x*(3*x-1)/2 )
S.append(Pn)
def fitsRule(Pj, Pk, S):
return isPent(Pj + Pk, S) and isPent(Pj - Pk, S)
T = []
setSet(1, 100, T)
diff =
|
99999999
for i in range(1, 5000):
for j in range(1, i):
if fitsRule(T[i], T[j], T) and (T[i] - T[j]) < diff:
diff = T[i] - T[j]
print('end.', diff)
|
wisechengyi/pants
|
src/python/pants/engine/build_files.py
|
Python
|
apache-2.0
| 12,398
| 0.003388
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os.path
from collections.abc import Mapping
from typing import Dict
from pants.base.exceptions import ResolveError
from pants.base.project_tree import Dir
from pants.base.specs import AddressSpec, AddressSpecs, SingleAddress, more_specific
from pants.build_graph.address import Address, BuildFileAddress
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.engine.addressable import (
AddressableDescriptor,
Addresses,
AddressesWithOrigins,
AddressWithOrigin,
BuildFileAddresses,
)
from pants.engine.fs import Digest, FilesContent, PathGlobs, Snapshot
from pants.engine.mapper import AddressFamily, AddressMap, AddressMapper
from pants.engine.objects import Locatable, SerializableFactory, Validatable
from pants.engine.parser import HydratedStruct
from pants.engine.rules import RootRule, rule
from pants.engine.selectors import Get, MultiGet
from pants.engine.struct import Struct
from pants.util.objects import TypeConstraintError
from pants.util.ordered_set import OrderedSet
class ResolvedTypeMismatchError(ResolveError):
"""Indicates a resolved object was not of the expected type."""
def _key_func(entry):
key, value = entry
return key
@rule
async def parse_address_family(address_mapper: AddressMapper, directory: Dir) -> AddressFamily:
"""Given an AddressMapper and a directory, return an AddressFamily.
The AddressFamily may be empty, but it will not be None.
"""
path_globs = PathGlobs(
globs=(
*(os.path.join(directory.path, p) for p in address_mapper.build_patterns),
*(f"!{p}" for p in address_mapper.build_ignore_patterns),
)
)
snapshot = await Get[Snapshot](PathGlobs, path_globs)
files_content = await Get[FilesContent](Digest, snapshot.directory_digest)
if not files_content:
raise ResolveError(
'Directory "{}" does not contain any BUILD files.'.format(directory.path)
)
address_maps = []
for filecontent_product in files_content:
address_maps.append(
AddressMap.parse(
filecontent_product.path, filecontent_product.content, address_mapper.parser
)
)
return AddressFamily.create(directory.path, address_maps)
def _raise_did_you_mean(address_family: AddressFamily, name: str, source=None) -> None:
names = [a.target_name for a in address_family.addressables]
possibilities = "\n ".join(":{}".format(target_name) for target_name in sorted(names))
resolve_error = ResolveError(
|
'"{}" was not found in namespace "{}". '
"Did you mean one of:\n {}".format(name, address_family.namespace, possibilities)
)
if source:
raise resolve_error from source
raise resolve_error
@rule
async def find_build_file(address: Address) -> BuildFileAddress:
address_family = await Get[AddressFamily](Dir(address.spec_path))
if address not in address_family.addres
|
sables:
_raise_did_you_mean(address_family=address_family, name=address.target_name)
return next(
build_file_address
for build_file_address in address_family.addressables.keys()
if build_file_address == address
)
@rule
async def find_build_files(addresses: Addresses) -> BuildFileAddresses:
bfas = await MultiGet(Get[BuildFileAddress](Address, address) for address in addresses)
return BuildFileAddresses(bfas)
@rule
async def hydrate_struct(address_mapper: AddressMapper, address: Address) -> HydratedStruct:
"""Given an AddressMapper and an Address, resolve a Struct from a BUILD file.
Recursively collects any embedded addressables within the Struct, but will not walk into a
dependencies field, since those should be requested explicitly by rules.
"""
build_file_address = await Get[BuildFileAddress](Address, address)
address_family = await Get[AddressFamily](Dir(address.spec_path))
struct = address_family.addressables.get(build_file_address)
inline_dependencies = []
def maybe_append(outer_key, value):
if isinstance(value, str):
if outer_key != "dependencies":
inline_dependencies.append(
Address.parse(
value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots,
)
)
elif isinstance(value, Struct):
collect_inline_dependencies(value)
def collect_inline_dependencies(item):
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
continue
if isinstance(value, Mapping):
for _, v in sorted(value.items(), key=_key_func):
maybe_append(key, v)
elif isinstance(value, (list, tuple)):
for v in value:
maybe_append(key, v)
else:
maybe_append(key, value)
# Recursively collect inline dependencies from the fields of the struct into `inline_dependencies`.
collect_inline_dependencies(struct)
# And then hydrate the inline dependencies.
hydrated_inline_dependencies = await MultiGet(
Get[HydratedStruct](Address, a) for a in inline_dependencies
)
dependencies = tuple(d.value for d in hydrated_inline_dependencies)
def maybe_consume(outer_key, value):
if isinstance(value, str):
if outer_key == "dependencies":
# Don't recurse into the dependencies field of a Struct, since those will be explicitly
# requested by tasks. But do ensure that their addresses are absolute, since we're
# about to lose the context in which they were declared.
value = Address.parse(
value,
relative_to=address.spec_path,
subproject_roots=address_mapper.subproject_roots,
)
else:
value = dependencies[maybe_consume.idx]
maybe_consume.idx += 1
elif isinstance(value, Struct):
value = consume_dependencies(value)
return value
# NB: Some pythons throw an UnboundLocalError for `idx` if it is a simple local variable.
# TODO(#8496): create a decorator for functions which declare a sentinel variable like this!
maybe_consume.idx = 0 # type: ignore[attr-defined]
# 'zip' the previously-requested dependencies back together as struct fields.
def consume_dependencies(item, args=None):
hydrated_args = args or {}
for key, value in sorted(item._asdict().items(), key=_key_func):
if not AddressableDescriptor.is_addressable(item, key):
hydrated_args[key] = value
continue
if isinstance(value, Mapping):
hydrated_args[key] = {
k: maybe_consume(key, v) for k, v in sorted(value.items(), key=_key_func)
}
elif isinstance(value, (list, tuple)):
hydrated_args[key] = tuple(maybe_consume(key, v) for v in value)
else:
hydrated_args[key] = maybe_consume(key, value)
return _hydrate(type(item), address.spec_path, **hydrated_args)
return HydratedStruct(consume_dependencies(struct, args={"address": address}))
def _hydrate(item_type, spec_path, **kwargs):
# If the item will be Locatable, inject the spec_path.
if issubclass(item_type, Locatable):
kwargs["spec_path"] = spec_path
try:
item = item_type(**kwargs)
except TypeConstraintError as e:
raise ResolvedTypeMismatchError(e)
# Let factories replace the hydrated object.
if isinstance(item, SerializableFactory):
item = item.create()
# Finally make sure objects that can self-validate get a chance to do so.
if isinstance(item, Validatable):
item.validate()
return
|
rogerhu/django
|
django/contrib/gis/geos/prepared.py
|
Python
|
bsd-3-clause
| 1,034
| 0
|
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.prototypes import prepared as capi
class PreparedGeometry(GEOSBase):
"""
A geometry that is prepared for performing certain operations.
At the moment this includes the contains covers, and intersects
operations.
"""
ptr_type = capi.PREPGEOM_PTR
def __init__(self, geom):
if not isinstance(geom, GEOSGeometry):
raise TypeError
self.ptr = capi.geos_prepare(geom.ptr)
def __del__(self):
if self._ptr:
capi.prepared_destroy(self._ptr)
def contains(self, other):
return capi.prepared_contains(self.ptr, other.ptr)
|
def contains_properly(self, other):
return capi.prepared_contains_properly(self.ptr, other.ptr)
def covers(self, other):
return capi.prepared_covers(self.ptr, other.ptr)
def intersects(self, other):
return capi.prepared_intersects(self.ptr, other.pt
|
r)
|
nzsquirrell/p2pool-myriad
|
oldstuff/SOAPpy/version.py
|
Python
|
gpl-3.0
| 22
| 0.090909
|
_
|
_version__="0.12.5
|
"
|
BirkbeckCTP/janeway
|
src/utils/management/commands/backup.py
|
Python
|
agpl-3.0
| 5,466
| 0.002012
|
import os
import shutil
import boto
from boto.s3.key import Key
import subprocess
from io import StringIO
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.conf import settings
from django.utils import timezone
from django.core.mail import send_mail
from core import models
def copy_file(source, destination):
"""
:param source: The source of the folder for copying
:param destination: The destination folder for the file
:return:
"""
destination_folder = os.path.join(settings.BASE_DIR, os.path.dirname(destination))
if not os.path.exists(destination_folder):
os.mkdir(destination_folder)
print("Copying {0}".format(source))
shutil.copy(os.path.join(settings.BASE_DIR, source),
os.path.join(settings.BASE_DIR, destination))
def copy_files(src_path, dest_path):
"""
:param src_path: The sourc
|
e folder for copying
:param dest_path: The destination these files/folders should be copied to
:return: None
"""
if not os.path.exists(src_path):
os.makedirs(src_path)
files = os.listdir(src_path)
for file_name in files:
if not file_name == 'temp':
full_file_name = os.path.join(src_path, file_name)
print("Copying {0}".format(full_file_name)
|
)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest_path)
else:
dir_dest = os.path.join(dest_path, file_name)
if os.path.exists(dir_dest):
shutil.rmtree(os.path.join(dir_dest))
shutil.copytree(full_file_name, dir_dest)
def mycb(so_far, total):
print('{0} kb transferred out of {1}'.format(so_far / 1024, total / 1024))
def handle_s3(tmp_path, start_time):
print("Sending to S3.")
file_name = '{0}.zip'.format(start_time)
file_path = os.path.join(settings.BASE_DIR, 'files', 'temp', file_name)
f = open(file_path, 'rb')
END_POINT = settings.END_POINT
S3_HOST = settings.S3_HOST
UPLOADED_FILENAME = 'backups/{0}.zip'.format(start_time)
# include folders in file path. If it doesn't exist, it will be created
s3 = boto.s3.connect_to_region(END_POINT,
aws_access_key_id=settings.S3_ACCESS_KEY,
aws_secret_access_key=settings.S3_SECRET_KEY,
host=S3_HOST)
bucket = s3.get_bucket(settings.S3_BUCKET_NAME)
k = Key(bucket)
k.key = UPLOADED_FILENAME
k.set_contents_from_file(f, cb=mycb, num_cb=200)
def handle_directory(tmp_path, start_time):
print("Copying to backup dir")
file_name = '{0}.zip'.format(start_time)
copy_file('files/temp/{0}'.format(file_name), settings.BACKUP_DIR)
def delete_used_tmp(tmp_path, start_time):
print("Deleting temp directory.")
shutil.rmtree(tmp_path)
file_path = "{0}/{1}.zip".format(os.path.join(settings.BASE_DIR, 'files', 'temp'), start_time)
os.unlink(file_path)
def send_email(start_time, e, success=False):
admins = models.Account.objects.filter(is_superuser=True)
message = ''
if not success:
message = 'There was an error during the backup process.\n\n '
send_mail(
'Backup',
'{0}{1}.'.format(message, e),
'backup@janeway',
[user.email for user in admins],
fail_silently=False,
)
class Command(BaseCommand):
"""
Pulls files together then sends them to aws bucket.
"""
help = "Deletes duplicate settings."
def handle(self, *args, **options):
"""Does a backup..
:param args: None
:param options: None
:return: None
"""
# Ensure temp dir exists:
if not os.path.exists(os.path.join(settings.BASE_DIR, 'files', 'temp')):
os.makedirs(os.path.join(settings.BASE_DIR, 'files', 'temp'))
start_time = str(timezone.now())
try:
tmp_path = os.path.join(settings.BASE_DIR, 'files', 'temp', start_time)
# dump database out to JSON and store in StringIO for saving
print('Dumping json db file')
json_out = StringIO()
call_command('dumpdata', '--indent=4', '--natural-foreign', '--exclude=contenttypes', stdout=json_out)
write_path = os.path.join(settings.BASE_DIR, 'files', 'temp', 'janeway.json')
with open(write_path, 'w', encoding="utf-8") as write:
json_out.seek(0)
shutil.copyfileobj(json_out, write)
os.mkdir(tmp_path)
copy_file('files/temp/janeway.json', 'files/temp/{0}/janeway.json'.format(start_time))
copy_files(os.path.join(settings.BASE_DIR, 'media'), os.path.join(tmp_path, 'media'))
copy_files(os.path.join(settings.BASE_DIR, 'files'), os.path.join(tmp_path, 'files'))
print("Creating archive.")
shutil.make_archive(os.path.join(settings.BASE_DIR, 'files', 'temp', start_time), 'zip', tmp_path)
if settings.BACKUP_TYPE == 's3':
handle_s3(tmp_path, start_time)
else:
handle_directory(tmp_path, start_time)
delete_used_tmp(tmp_path, start_time)
if settings.BACKUP_EMAIL:
send_email(start_time, 'Backup was successfully completed.')
except Exception as e:
send_email(start_time, e)
|
plumJ/catsup
|
config.py
|
Python
|
mit
| 1,211
| 0.012386
|
# -*- coding:utf-8 -*-
import os
site_title = 'plum.J'
site_description = '\'s blog'
site_url = 'http://plumj.com'
static_url = 'static'
theme_name = 'sealscript'
google_analytics = ''
catsup_path = os.path.dirname(__file__)
posts_path = os.path.join(catsup_path, '_posts')
theme_path = os.path.join(catsup_path, 'themes', theme_name)
common_template_path = os.path.join(catsup_path, 'template')
deploy_path = os.path.join(catsup_path, 'deploy')
twitter = '_plumJ'
weibo = 'dobbyfree'
github = 'plumJ'
disqus_shortname = 'catsup'
feed = 'feed.xml'
post_per_page = 3
links = (
('Leonbb', 'http://leonbb.com', "Leonbb's Blog"),
)
if site_url.ends
|
with('/'):
site_url = site_url[:-1]
if static_url.endswith('/'):
static_url = static_url[:-1]
settings = dict(static_path=os.path.join(theme_path, 'static'),
template_path=os.path.join(theme_path, 'template'),
gzip=True,
site_title=site_title,
site_description=site_description,
site_url=site_url,
twitter=twitter,
weibo=weibo,
github=github,
feed=feed,
|
post_per_page=post_per_page,
disqus_shortname=disqus_shortname,
links=links,
static_url=static_url,
google_analytics=google_analytics,
)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/application_gateway_ssl_predefined_policy.py
|
Python
|
mit
| 1,826
| 0.000548
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewaySslPredefinedPolicy(SubResource):
"""An Ssl predefined policy.
:param id: Resource ID.
:type id: str
:param name: Name of Ssl predefined policy.
:type name: str
:param cipher_suites: Ssl cipher suites to be enabled in the specified
order for application gateway.
:type cipher_suites: list[str or
~azure.mgmt.network.v2017_10_01.models.ApplicationGatewaySslCipherSuite]
:param min_protocol_version: Minimum version of Ssl protocol to be
supported on application gateway. Possible values include: 'TLSv1_0',
'TLSv1_1', 'TLSv1_2'
:type min_pro
|
tocol_version: str or
~azure.mgmt.network.v2017_10_01.models.ApplicationGatewaySslProtocol
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'cipher_suites': {'key': 'properties.cipherSuites', 'type': '[str]'},
'min_protocol_version': {'key': 'properties.minProtocolVersion', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ApplicationGatewaySslPredefinedPolicy, self).__in
|
it__(**kwargs)
self.name = kwargs.get('name', None)
self.cipher_suites = kwargs.get('cipher_suites', None)
self.min_protocol_version = kwargs.get('min_protocol_version', None)
|
wbsavage/shinken
|
shinken/misc/datamanagerhostd.py
|
Python
|
agpl-3.0
| 5,441
| 0.000919
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
from shinken.util import safe_print
from shinken.misc.datamanagerskonf import DataManagerSKonf
class FakeRegenerator(object):
def __init__(self):
return
class DataManagerHostd(DataManagerSKonf):
def get_generics(self, table, key):
r = []
for i in self.get_all_in_db(table):
r.append(i)
return r
def get_packs(self):
return self.get_generics('packs', 'pack_name')
# Get a specific object
def get_pack(self, pname):
r = self.get_in_db('packs', 'pack_name', pname)
return r
def get_pack_by_id(self, pid):
r = self.get_in_db('packs', '_id', pid)
return r
def get_pack_by_user_packname(self, username, packname):
value = '%s-%s' % (username, packname)
r = self.get_in_db('packs', 'link_id', value)
return r
def build_pack_tree(self, packs):
# dirname sons packs
t = ('', [], [])
for p in packs:
path = p.get('path', '/')
dirs = path.split('/')
dirs = [d for d in dirs if d != '']
pos = t
for d in dirs:
print "In the level", d, " and the context", pos
sons = pos[1]
print "Get the sons to add me", sons
if not d in [s[0] for s in sons]:
print "Add a new level"
print "Get the sons to add me", sons
node = (d, [], [])
sons.append(node)
# Ok now search the node for d and take it as our new position
for s in sons:
if s[0] == d:
print "We found our new position", s
pos = s
# Now add our pack to this entry
print "Add pack to the level", pos[0]
pos[2].append(p)
print "The whole pack tree", t
return t
def get_pack_tree(self):
packs = self.get_packs()
packs = [p for p in packs if p['state'] in ['ok', 'pending']]
print "GOT IN DB PACKS", packs
t = self.build_pack_tree(packs)
r = self._get_pack_tree(t)
print "RETURN WHOLE PACK TREE", r
return r
def _get_pack_tree(self, tree):
print "__get_pack_tree:: for", tree
name = tree[0]
sons = tree[1]
packs = tree[2]
# Sort our sons by they names
def _sort(e1, e2):
if e1[0] < e2[0]:
return -1
if e1[0] > e2[0]:
return 1
return 0
sons.sort(_sort)
res = []
if name != '':
res.append({'type': 'new_tree', 'name': name})
for p in packs:
res.append({'type': 'pack', 'pack': p})
for s in sons:
r = self._get_pack_tree(s)
res.extend(r)
if name != '':
res.append({'type': 'end_tree', 'name': name})
print "RETURN PARTIAL", res
return res
# We got a pack name, we look for all objects, and search where this
# host template name is used
def related_to_pack(self, pack):
name = pack.get('pack_name', 'unknown').strip()
print "TRY TO MATCH PACK", name
res = []
for tname in pack.get('templates', []):
print "Try to find a sub template of a pack", tname
tname = tname.strip()
# First try to match the host template
tpl = None
for h in self.get_hosts():
print "Try to match pack with", h, name, h.get('register', '1') == '0', h.get('name', '') == name
if h.get('register', '1') == '0' and h.get('name', '') == tname:
print "MATCH FOUND for", tname
tpl = h
break
if not tpl:
continue
print "And now the services of this pack template", tname
services = []
for s in self.get_services():
# I want only the templates
if s.get('register', '1') != '0':
continue
use = s.get('host_name', '')
elts = use.split(',')
elts = [e.strip() for e in elts]
if tname in elts:
print "FOUND A
|
SERVICE THAT MA5TCH", s.get('service_description', '')
|
services.append(s)
res.append((tpl, services))
return res
datamgr = DataManagerHostd()
|
brownnrl/moneyguru
|
support/genchangelog.py
|
Python
|
gpl-3.0
| 2,089
| 0.005744
|
#!/usr/bin/env python3
import sys
import datetime
import re
CHANGELOG_FORMAT = """
{version} ({date})
----------------------
{description}
"""
TIXURL = "https://github.com/hsoft/moneyguru/issues/{}"
def tixgen(tixurl):
"""This is a filter *generator*. tixurl is a url pattern for the tix with a {0} placeholder
for the tix #
"""
urlpattern = tixurl.format('\\1') # will be replaced buy the content of the first group in re
R = re.compile(r'#(\d+)')
repl = '`#\\1 <{}>`__'.format(urlpattern)
return lambda text: R.sub(repl, text)
re_changelog_header = re.compile(r'=== ([\d.b]*) \(([\d\-]*)\)')
def read_changelog_file(filename):
def iter_by_three(it):
while True:
version = next(it)
date = next(it)
description = next(it)
yield version, date, description
with open(filename, 'rt', encoding='utf-8') as fp:
contents = fp.read()
splitted = re_changelog_header.split(contents)[1:] # the first item is empty
# splitted = [version1, date1, desc1, version2, date2, ...]
result = []
for version, date_str, description in iter_by_three(iter(splitted)):
date = datetime.datetime.strptime(date_str, '%Y-%m-%d').date()
d = {'
|
date': date, 'date_str': date_str, 'version': version, 'description': description.strip()}
result.append(d)
return result
def changelog_to_rst(changelogpath):
changelog = read_
|
changelog_file(changelogpath)
tix = tixgen(TIXURL)
for log in changelog:
description = tix(log['description'])
# The format of the changelog descriptions is in markdown, but since we only use bulled list
# and links, it's not worth depending on the markdown package. A simple regexp suffice.
description = re.sub(r'\[(.*?)\]\((.*?)\)', '`\\1 <\\2>`__', description)
rendered = CHANGELOG_FORMAT.format(
version=log['version'],
date=log['date_str'],
description=description)
print(rendered)
if __name__ == '__main__':
changelog_to_rst(sys.argv[1])
|
Juniper/neutron
|
neutron/db/migration/cli.py
|
Python
|
apache-2.0
| 4,441
| 0.000225
|
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from alembic import command as alembic_command
from alembic import config as alembic_config
from alembic import util as alembic_util
from oslo.config import cfg
from neutron.common import legacy
_core_opts = [
cfg.StrOpt('core_plugin',
default='',
help=_('Neutron plugin provider module')),
cfg.ListOpt('service_plugins',
default=[],
help=_("The service plugins Neutron will use")),
]
_quota_opts = [
cfg.StrOpt('quota_driver',
default='',
help=_('Neutron quota driver class')),
]
_db_opts = [
cfg.StrOpt('connection',
deprecated_name='sql_connection',
default='',
help=_('URL to database')),
]
CONF = cfg.ConfigOpts()
CONF.register_opts(_core_opts)
CONF.register_opts(_db_opts, 'database')
CONF.register_opts(_quota_opts, 'QUOTAS')
def do_alembic_command(config, cmd, *args, **kwargs):
try:
getattr(alembic_command, cmd)(config, *args, **kwargs)
except alembic_util.CommandError as e:
alembic_util.err(str(e))
def do_check_migration(config, cmd):
do_alembic_command(config, 'branches')
def do_upgrade_downgrade(config, cmd):
if not CONF.command.revision and not CONF.command.delta:
raise SystemExit(_('You must provide a revision or relative delta'))
revision = CONF.command.revision
if CONF.command.delta:
sign = '+' if CONF.command.name == 'upgrade' else '-'
revision = sign + str(CONF.command.delta)
else:
revision = CONF.command.revision
do_alembic_command(config, cmd, revision, sql=CONF.command.sql)
def do_stamp(config, cmd):
do_alembic_command(config, cmd,
CONF.command.revision,
sql=CONF.command.sql)
def do_revision(config, cmd):
do_alembic_command(config, cmd,
message=CONF.command.message,
autogenerate=CONF.command.autogenerate,
sql=CONF.command.sql)
def add_command_parsers(subparsers):
for name in ['current', 'history', 'branches']:
parser = subparsers.add_parser(name)
parser.set_defaults(func=do_alembic_command)
parser = subparsers.add_parser('check_migration')
parser.set_defaults(func=do_check_migration)
for name in ['upgrade', 'downgrade']:
parser = subparsers.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser.set_defaults(func=do_upgrade_downgrade)
parser = subparsers.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser.set_defaults(func=do_stamp)
parser = subparsers.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
parser.set_defaults(func=do_revision)
command_opt = cfg.SubCommandOpt('command',
title='Command',
help=_('Available commands'),
handler=add_command_parsers)
CONF.register_cli_opt(command_opt)
def main():
config = ale
|
mbic_config.Config(
os.path.join(os.path.dirname(__file__),
|
'alembic.ini')
)
config.set_main_option('script_location',
'neutron.db.migration:alembic_migrations')
# attach the Neutron conf to the Alembic conf
config.neutron_config = CONF
CONF()
#TODO(gongysh) enable logging
legacy.modernize_quantum_config(CONF)
CONF.command.func(config, CONF.command.name)
|
quantumlib/ReCirq
|
recirq/quantum_chess/bit_utils.py
|
Python
|
apache-2.0
| 2,765
| 0
|
# Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for converting to and from bit boards.
"""
from typing import List
import cirq
import recirq.quantum_chess.move as move
def nth_bit_of(n: int, bit_board: int) -> bool:
"""Returns the n-th bit of a 64-bit chess bitstring"""
return (bit_board >> n) % 2 == 1
def set_nth_bit(n: int, bit_board: int, val: bool) -> int:
"""Sets the nth bit of the bitstring to a specific value."""
return bit_board - (nth_bit_of(n, bit_board) << n) + (int(val) << n)
def bit_to_qubit(n: int) -> cirq.Qid:
"""Turns a number into a cirq Qubit."""
return cirq.NamedQubit(bit_to_square(n))
def num_ones(n: int) -> int:
"""Number of ones in the binary representation of n."""
count = 0
while n > 0:
if n % 2 == 1:
count += 1
n = n // 2
return count
def bit_ones(n: int) -> List[int]:
"""Indices of ones in the binary representation of n."""
indices = []
index = 0
while n > 0:
if n % 2 == 1:
indices.append(index)
n >>= 1
index += 1
return indices
def qubit_to_bit(q: cirq.LineQubit) -> int:
"""Retrieves the number from a cirq Qubit's name.
Does not work for ancilla Qubits.
"""
return square_to_bit(q.name)
def xy_to_bit(x: int, y: int) -> int:
"""Transform x/y coordinates into a bitboard bit number."""
return y * 8 + x
def square_to_bi
|
t(sq: str) -> int:
"""Transform algebraic square notation into a bitboard bit number."""
return move.y_of(sq) * 8 + move.x_of(sq)
def bit_to_square(bit: int) -> str:
"""Transform a bitboard bit number into algebraic square notation."""
|
return move.to_square(bit % 8, bit // 8)
def squares_to_bitboard(squares: List[str]) -> int:
"""Transform a list of algebraic squares into a 64-bit board bitstring."""
bitboard = 0
for sq in squares:
bitboard += 1 << square_to_bit(sq)
return bitboard
def bitboard_to_squares(bitboard: int) -> List[str]:
"""Transform a 64-bit bitstring into a list of algebraic squares."""
squares = []
for n in range(64):
if nth_bit_of(n, bitboard):
squares.append(bit_to_square(n))
return squares
|
bigfootproject/OSMEF
|
data_processing/overview.py
|
Python
|
apache-2.0
| 3,112
| 0.010604
|
#!/usr/bin/python
import json
import os
import sys
if not os.access("data.json", os.F_OK):
print("Please run aggregate.py first")
sys.exit(1)
data = json.load(open("data.json", "r"))
print("Results for 1 connection")
print("{:<20s} {:>8s} | {:>7s} | {:>5s} | {:>5s} | {:>5s} | {:>5s}".format("name",
"btc avg",
"btc std",
"cpuRX",
"std",
"cpuTX",
"std"))
for name in sorted(data):
print("{:<20s} {:>8.2f} | {:>7.2f} | {:>5.2f} | {:>5.2f} | {:>5.2f} | {:>5.2f}".format(name,
data[name]["c=1"]["btc"]["rx"]["rate_KBps"]["avg"],
data[name]["c=1"]["conn_1"]["btc"]["rx"]["rate_KBps"]["std"],
data[name]["c=1"]["cpu"]["rx"]["avg"],
data[name]["c=1"]["conn_1"]["cpu"]["rx"]["std"],
data[name]["c=1"]["cpu"]["tx"]["avg"],
data[name]["c=1"]["conn_1"]["cpu"]["tx"]["std"],
))
print("\nResults for 30 connections")
print("{:<20s} {:>8s} | {:>8s} | {:>7s} | {:>5s} | {:>5s} | {:>5s} | {:>5s}".format("name",
"btc sum",
"btc avg",
"btc std",
"cpuRX",
"std",
"cpuTX",
|
"std"))
for name
|
in sorted(data):
print("{:<20s} {:>8.2f} | {:>8.2f} | {:>7.2f} | {:>5.2f} | {:>5.2f} | {:>5.2f} | {:>5.2f}".format(name,
data[name]["c=30"]["btc"]["rx"]["rate_KBps"]["sum"],
data[name]["c=30"]["btc"]["rx"]["rate_KBps"]["avg"],
data[name]["c=30"]["btc"]["rx"]["rate_KBps"]["std"],
data[name]["c=30"]["cpu"]["rx"]["avg"],
data[name]["c=30"]["cpu"]["rx"]["std"],
data[name]["c=30"]["cpu"]["tx"]["avg"],
data[name]["c=30"]["cpu"]["tx"]["std"],
))
|
piksels-and-lines-orchestra/inkscape
|
share/extensions/fig2dev-ext.py
|
Python
|
gpl-2.0
| 1,025
| 0.001951
|
#!/usr/bin/env python
"""
fig2dev-ext.py
Python script for running fig2dev in Inkscape extensions
Copyright (C) 2008 Stephen Silver
This program is free software; you can redistribute it and/or modify
it under the t
|
erms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public Li
|
cense for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import sys
from run_command import run
run('fig2dev -L svg "%s" "%%s"' % sys.argv[1].replace("%","%%"), "fig2dev")
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
|
cypherai/PySyft
|
syft/__init__.py
|
Python
|
apache-2.0
| 42
| 0
|
from
|
.tensor import *
from .math
|
import *
|
wangg12/caffe
|
scripts/download_model_binary.py
|
Python
|
bsd-2-clause
| 2,496
| 0.000401
|
#!/usr/bin/env python
import os
import sys
import time
import yaml
import urllib
import hashlib
import argparse
required_keys = ['caffemodel', 'caffemodel_url', 'sha1']
def reporthook(count, block_size, total_size):
"""
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
"""
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def parse_readme_frontmatter(dirname):
readme_filename = os.path.join(dirname, 'readme.md')
with open(readme_filename) as f:
lines = [line.strip() for line in f.readlines()]
top = lines.index('---')
bottom = lines.index('---', top + 1)
frontmatter = yaml.load('\n'.join(lines[top + 1:bottom]))
assert all(key in frontmatter for key in required_keys)
return dirname, frontmatter
def valid_dirname(dirname):
try:
return parse_readme_frontmatter(dirname)
except Exception as e:
print('ERROR: {}'.format(e))
raise argparse.ArgumentTypeError(
'Must be valid Caffe model directory with a correct readme.md')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download trained model binary.')
parser.add_argument('dirname', type=valid_dirname)
args = parser.parse_args()
# A tiny hack: the dirname validator also returns readme YAML frontmatter.
dirname = args.dirname[0]
|
frontmatter = args.dirname[1]
model_filename = os.path.join(dirname, frontmatter['caffemodel'])
# Closure-d function for checking SHA1.
def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']):
with open(filename, 'r') as f:
return hashlib.sha1(f.r
|
ead()).hexdigest() == sha1
# Check if model exists.
if os.path.exists(model_filename) and model_checks_out():
print("Model already exists.")
sys.exit(0)
# Download and verify model.
urllib.urlretrieve(
frontmatter['caffemodel_url'], model_filename, reporthook)
if not model_checks_out():
print('ERROR: model did not download correctly! Run this again.')
sys.exit(1)
|
robertding/vo
|
vo/dl/__init__.py
|
Python
|
mit
| 242
| 0.004132
|
#!/usr/bin/
|
env python
# -*- coding:utf-8 -*-
#
# Author : RobertDing
# E-mail : robertdingx@gmail.com
# Date : 15/08/29 02:24:28
# Desc : fetch video
#
from __future__ import absolute_import, divisi
|
on, with_statement
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.